#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Description: 软件信息爬虫"""

import urllib2
import urllib
import time
import datetime

from contact.common.spiderutil.spider import Spider
from contact.common.http import get_http_response
from contact.common import json

class SoftInfoSpider(object):
    """软件信息爬虫"""
    def __init__(self, template_id):
        self.template_id = template_id
    
    def _get_template(self):
        url = 'http://mobilecontactonline.appspot.com/soft/spider/template/'
#        url = 'http://127.0.0.1:8080/soft/spider/template/'
        while True:
            try:
                response = urllib2.urlopen(url)
                tpl = json.loads(response.read())
                return tpl
            except Exception, e:
                print e
                time.sleep(3)
        
    def _save_softinfo(self, info):
        post_url = 'http://mobilecontactonline.appspot.com/soft/info/save/'
#        post_url = 'http://127.0.0.1:8080/soft/info/save/'
        if info['info'] != '{}':
            print 'Soft', info['name'], info['url']
        for k, v in info.iteritems():
            if isinstance(v, unicode):
                info[k] = v.encode('utf-8')
        data = urllib.urlencode(info)
        while True:
            try:
                response = urllib2.urlopen(post_url, data)
                if response.read() == '1':
                    break
            except Exception, e:
                print e
                time.sleep(3)
        
    def _save_downurls(self, urlinfos):
        post_url = 'http://mobilecontactonline.appspot.com/soft/downurl/save/'
#        post_url = 'http://127.0.0.1:8080/soft/downurl/save/'
        data = urllib.urlencode({'downurls': json.dumps(urlinfos)})
        while True:
            try:
                response = urllib2.urlopen(post_url, data)
                if response.read() == '1':
                    break
            except Exception, e:
                print e
                time.sleep(3)
                
    def _finish_spider(self, key):
        url = 'http://mobilecontactonline.appspot.com/soft/spider/template/finish/%s/' % key
        while True:
            try:
                response = urllib2.urlopen(url)
                if response.read() == '1':
                    break
            except Exception, e:
                print e
                time.sleep(3)
    
    def run(self):
        while True:
            self.start_grap()
            print 'sleep 10 seconds'
    
    def start_grap(self):
        template = self._get_template()
        if not template:
            return
        settings = eval(template['settings'].replace('\r\n', '\n'))
        self._spider = Spider(template['start_url'], settings)
        _last_softinfo_url = None
        for level, url, parenturl, response, content, \
                downurls, softinfo in self._spider.walk():
            time_tuple = response.headers.getdate('last-modified')
            if time_tuple:
                last_modified = datetime.datetime(*time_tuple[:6]) # gmt时间，和我们相差8个时区
            else:
                last_modified = datetime.datetime.utcnow()
            if softinfo:
                _last_softinfo_url = url
                info = {'info': json.dumps(softinfo),
                        'url': url,
                        'parent_url': parenturl,
                        'name': softinfo.get('name', ''),
                        'status': response.code,
                        'last_modified': last_modified.strftime('%Y-%m-%d %H:%M:%S')}
                self._save_softinfo(info)
            if downurls:
                assert _last_softinfo_url, '%s %s' % (url, parenturl)
#                if _last_softinfo_url is None:
#                    _last_softinfo_url = parenturl
                downurl_infos = []
                un_anti_navigate_count = 0
                for downurl in downurls: # 获取下载url的last-modified
                    need_add = True
                    data = {'downurl': downurl, 
                            'realurl': downurl,
                            'referer': url,
                            'parent_url': _last_softinfo_url, 
                            'status': 200,
                            'filesize': 0,
                            'last_modified': datetime.datetime.utcnow(),
                            'anti_navigate': False,
                            }
                    if downurl.startswith('http://'):
                        response = get_http_response(downurl, url)
                        if response is not None and self._is_file(response): # 过滤无用链接
                            t = response.headers.getdate('last-modified')
                            if t:
                                last_modified = datetime.datetime(*t[:6])
                            else:
                                last_modified = datetime.datetime.utcnow()
                            data['realurl'] = response.url
                            data['last_modified'] = last_modified
                            data['status'] = getattr(response, 'code', 200) # 有时候会没有code
                            data['filesize'] = int(response.headers.get('Content-Length', 0))
                            data['anti_navigate'] = \
                                self._check_anti_navigate(data['realurl'], data['filesize'])
                        else:
                            need_add = False
                    if need_add:
                        print '==> anti_navigate', data['anti_navigate']
                        print '         ', data['realurl'], _last_softinfo_url
                        downurl_infos.append(data)
                        if not data['anti_navigate']:
                            un_anti_navigate_count += 1
                            if un_anti_navigate_count > 1: # 找到2个防盗链的链接即可
                                break
                self._save_downurls(downurl_infos)
#                _last_softinfo_url = None
        self._finish_spider(template['key'])
    
    def _check_anti_navigate(self, downurl, filesize):
        """检测downurl是否防盗链"""
        if not downurl.startswith('http://'): # 只检测http的
            return False
        response = get_http_response(downurl)
        return response is None or not self._is_file(response) or \
            int(response.headers.get('Content-Length', 0)) != filesize
#    
    def _is_file(self, response):
        """根据http响应，判断是否文件类型"""
        content_type = response.headers.get('content-type', '').lower()
        if 'text/html' in content_type:
            return False
        return True

#    def _convert_to_datetime(self, datetime_str):
#        if isinstance(datetime_str, basestring):
#            d = None
#            try:
#                d = datetime.datetime.strptime(datetime_str, '%Y-%m-%d')
#            except:
#                d = None
#            if d is None:
#                try:
#                    d = datetime.datetime.strptime(datetime_str, '%Y-%m-%d %H:%M:%S')
#                except:
#                    d = None
#            if d is None:
#                try:
#                    d = datetime.datetime.strptime(datetime_str, '%Y-%m-%d %H:%M')
#                except:
#                    d = None
#            if d is None:
#                try:
#                    d = datetime.datetime.strptime(datetime_str, '%d. %B %Y')
#                except:
#                    d = None
#            if d is not None:
#                return d
#        return datetime.datetime.now()
        

if __name__ == '__main__':
    spider = SoftInfoSpider(123)
    spider.run()