# coding=utf-8

'''
 python2 /work/spider/wdm.py  109806 '{"url": "https://vanke.com/news/data?typeid=1&newsid=6716", "meta": {"list_title": "\u6e05\u534e\u3001\u54c8\u4f5b\u8054\u5408\u53d1\u8d77\u5168\u7403\u516c\u5171\u536b\u751f\u5b66\u9662\u9662\u957f\u8054\u76df", "__bool_keys__": [], "__detail_url__": "http://vanke.com/news/data?newsid=6716&typeid=1", "list_time": "2022.04.24", "__datetime_keys__": []}}' auto_check:hash_try_task_content
'''

import sys
import redis
import imp
reload(sys)
sys.setdefaultencoding('utf-8')
import spider
import copy
import time
import re
import htmlparser
import datetime
from urlparse import urljoin
import json
from urllib import unquote


class MySpider(spider.Spider):
    def __init__(self, cmd_args=None,proxy_enable=False):
        spider.Spider.__init__(self, cmd_args=cmd_args)

spider = MySpider()
# spider.proxy_enable = False
spider.init_dedup()
spider.init_downloader()

def setConfigureContent(config_content,content_base=False,max_time=False):
    '''替换配置中的部分关键字：最大时间，链接去重，时间过滤'''
    # config_content = config_content.encode("utf8")
    if max_time:
        config_content = re.sub('(?is)datetime\.timedelta\(\s*days\s*=\s*\d+\s*\)','datetime.timedelta(days=10000)',config_content)
        config_content = re.sub('(?is)(?P<time_limit>\S+?)\s*=\s*datetime\.datetime\.utcnow\(\)\s{0,2}-\s{0,2}\S+','\g<time_limit> = datetime.datetime.utcnow() - datetime.timedelta(days=10000)',config_content)
        config_content = re.sub('(?is)(?P<time_limit>\S+?)\s*=\s*self\.gtime\s{0,2}-\s{0,2}\S+','\g<time_limit> = datetime.datetime.utcnow() - datetime.timedelta(days=10000)',config_content)
    else:
        if content_base:
            config_content = re.sub('(?is)if\s*\w+\s*in\s*self\..*?\:.*?continue', '', config_content)
        else:
            # config_content = re.sub('(?i)datetime\.timedelta\(\s*.+?\s*=*\s*.+?\s*\)','datetime.timedelta(days=10000)',config_content)
            config_content = re.sub('(?is)datetime\.timedelta\(\s*days\s*=\s*\d+\s*\)','datetime.timedelta(days=10000)',config_content)
            config_content = re.sub('(?is)(?P<time_limit>\S+?)\s*=\s*datetime\.datetime\.utcnow\(\)\s{0,2}-\s{0,2}\S+','\g<time_limit> = datetime.datetime.utcnow() - datetime.timedelta(days=10000)',config_content)
            config_content = re.sub('(?is)(?P<time_limit>\S+?)\s*=\s*self\.gtime\s{0,2}-\s{0,2}\S+','\g<time_limit> = datetime.datetime.utcnow() - datetime.timedelta(days=10000)',config_content)
            config_content = re.sub('(?is)if\s*\w+\s*in\s*self\..*?\:.*?continue', '', config_content)
            config_content = re.sub('(?i)(?P<start>if.*?time.*?)[<>]=*(?P<end>\s*[a-z]+.*?):','\g<start>==\g<end>:',config_content)
            pattern1 = re.compile(r'(?i)(if.*?time.*?)<(.*?):')
            pattern12 = re.findall(pattern1, config_content)
            pattern2 = re.compile(r'(?i)(if.*?time.*?)>(.*?):')
            pattern22 = re.findall(pattern2, config_content)
    return config_content

def load_module(config_content, module_name):
    '''将配置代码转成可导入的模块并导入'''
    code = compile(config_content, '', 'exec')
    module = imp.new_module(module_name)
    exec code in module.__dict__
    return module

def result_check(result_dict):
    '''对返回结果进行简单的字段核验并打标，方便质检'''
    necessary_keys = ['title','ctime','gtime','source','siteName','channel','url','content']
    str_keys = ['title','source','retweeted_source','retweeted_status_url','url','content','content_xml','siteName','channel','face_img','__index_suffix','list_page_url']
    list_str_keys = ['pic_base64','pic_urls','video_urls','audio_urls']
    list_dict_keys = ['visitCount','replyCount']
    digit_keys = ['__no_time_filter','is_junk']
    datetime_keys = ['ctime','gtime','self.gtime']
    total_keys = ['base_check','site_Name', 'site_domain', 'info_flag', 'time_check', 'title', 'source', 'retweeted_source', 'retweeted_status_url', 'url', 'content','content_xml', 'siteName', 'channel', 'face_img', '__index_suffix','list_page_url', 'pic_base64', 'pic_urls', 'video_urls', 'audio_urls', 'visitCount', 'replyCount', '__no_time_filter', 'is_junk','ctime','gtime','self.gtime']
    red_key = []
    yellow_key = ['__no_time_filter','is_junk','__index_suffix']
    url = result_dict.get('url')
    for necessary_key in necessary_keys:
        if not result_dict.get(necessary_key):
            red_key.append(necessary_key)

    for item_key,item_value in result_dict.items():
        if item_key not in total_keys or \
            (item_key not in [] and not item_value) or \
            item_key==item_value:
            yellow_key.append(item_key)
            continue

        if item_key in str_keys and not isinstance(item_value,str) or \
            item_key in list_str_keys and not isinstance(item_value,list) or \
            item_key in list_dict_keys and not isinstance(item_value,list) or \
            item_key in digit_keys and not isinstance(item_value,int) or \
            item_key in datetime_keys and not isinstance(item_value,type(datetime.datetime.utcnow())):
            red_key.append(item_key)
            continue

        if item_key in list_str_keys:
            for item_value_item in item_value:
                if not isinstance(item_value_item,str):
                    red_key.append(item_key)
                    break
                if not item_value_item or item_value_item==url:
                    red_key.append(item_key)
                    break
                if item_key == 'pic_urls' and re.findall('(?is)^data\s*:.{0,20}base64\s*,',item_value_item) or \
                    item_key == 'pic_base64' and not re.findall('(?is)^data\s*:.{0,20}base64\s*,',item_value_item):
                    red_key.append(item_key)
                    break

        if item_key in list_dict_keys:
            for item_value_item in item_value:
                if not isinstance(item_value_item,dict):
                    red_key.append(item_key)
                    break
                if not isinstance(item_value_item.get('count'),int) or item_value_item.get('count')<0:
                    red_key.append(item_key)
                    break

    try:
        if result_dict.get('ctime')>=result_dict.get('gtime')-datetime.timedelta(minutes=1):
            red_key.append('ctime')
    except:
        red_key.extend(['ctime','gtime'])

    try:
        if result_dict.get('gtime') == result_dict.get('self.gtime'):
            red_key.append('gtime')
    except:
        red_key.append('gtime')

    for item_key in ['channel','source','retweeted_source']:
        try:
            item_value = result_dict.get(item_key)
            if item_key == 'channel' and item_value.lower() in ['home','首页']:
                red_key.append(item_key)
            elif item_value==result_dict.get("site_Name"):
                yellow_key.append(item_key)
            elif (item_value.count(':') or item_value.count('：')) or \
                re.findall('\\n|\\r|\\t|\\v',item_value) or re.findall('^\s+|\s+$',item_value) or \
                re.findall('&[a-z]{2,5};',item_value,re.I) or re.findall('[\<\>\-\\\\]',item_value):
                yellow_key.append(item_key)
            elif (item_key == 'source' and ('作者' in item_value or '编辑' in item_value)) or (item_key == 'retweeted_source' and '来源' in item_value):
                yellow_key.append(item_key)
        except:
            red_key.append(item_key)

    if result_dict.get('site_Name','').count('-') or result_dict.get('site_Name','').count(' '):
        yellow_key.append('site_Name')

    try:
        if result_dict.get('siteName','').count(' ') or result_dict.get('siteName','')=='{}-{}'.format(result_dict.get('site_Name',''),result_dict.get('site_Name','')):
            yellow_key.append('siteName')
    except:
        red_key.append('siteName')

    try:
        time_check_num = re.findall('时限为(\d+)天',result_dict.get('time_check'))
        if time_check_num and int(time_check_num[0])>7:
            red_key.append('time_check')
        elif result_dict.get('base_check') == 1 and result_dict.get('c_time') < result_dict.get('gtime')-datetime.timedelta(days=7):
            red_key.append('time_check')
            result_dict.update({'time_check':'配置时间过滤异常'})
        elif '有冗余时间过滤' in result_dict.get('time_check'):
            yellow_key.append('time_check')
        elif not time_check_num:
            red_key.append('time_check')
        elif 'spider.Spider模板未做标题过滤' in result_dict.get('time_check'):
            red_key.append('time_check')
    except:
        red_key.append('time_check')

    if result_dict.get('info_flag')=='02':
        if not result_dict.get('visitCount'):
            yellow_key.append('visitCount')
            result_dict.update({'visitCount':'visitCount'})
        if not result_dict.get('replyCount'):
            yellow_key.append('replyCount')
            result_dict.update({'replyCount':'replyCount'})

    result_dict.update({'colour_keys':{'red_key':list(set(red_key)),'yellow_key':list(set(yellow_key))}})
    return result_dict

def get_detail_result(config_id,task_url,redis_content_keys):
    url_db = redis.StrictRedis.from_url('redis://redis-spider-cooperation-ZBMBBG.istarshine.net.cn/2')
    config_content_base = url_db.hget(redis_content_keys,config_id)
    if 'auto_check:' in redis_content_keys:
        task_id =  url_db.hget('auto_check:hash_config_task_ids',config_id)
        if task_id:
            base_info_flag = url_db.hget('auto_check:hash_config_base_infos',task_id)
        else:
            base_info_flag = ''
    else:
        base_info_flag = ''
    config_content_change = setConfigureContent(config_content_base)
    config_base1 = load_module(config_content_base,'CC1')
    CC1 = config_base1.MySpider()
    if 'gtime' not in dir(CC1):
        CC1.gtime = datetime.datetime.utcnow()

    if 'class MySpider(spider.Spider):' in config_content_base:
        config_model = 'spider.Spider'
    elif 'class MySpider(spiderDefault.Spider):' in config_content_base:
        config_model = 'spiderDefault.Spider'
    else:
        return {}
    try:
        time_check = ''
        time_check_code = re.search('(?is)def\s+parse_detail_page.*?\(\s*self\s*,.+?\\n\s*if\s*ctime\s*==\s*\S+?\s*:.*?\\n\s*return',config_content_change)
        title_check_code = re.search('(?is)def\s+parse_detail_page.*?\s+if\s*not\s*title\s*\:.{0,20}\\n\s*return',config_content_change)
        # print [config_content_change]
        # print re.findall('(?is)def\s+parse_detail_page.*?\(\s*if\s*not\s*title\s*\:\\n\s*return',config_content_change)
        if config_model=='spider.Spider' and time_check_code:
            c_time_num = '%s'%(CC1.gtime+datetime.timedelta(minutes=1)-CC1.c_time).days
            time_check = '有时间过滤，时限为%s天'%c_time_num
        elif config_model=='spiderDefault.Spider' and time_check_code:
            c_time_num = '%s'%CC1.max_interval.days
            time_check = '有冗余时间过滤，时限为%s天'%c_time_num
        elif config_model=='spiderDefault.Spider' and not time_check_code:
            c_time_num = '%s'%CC1.max_interval.days
            time_check = '有时间过滤，时限为%s天'%c_time_num
        else:
            time_check = '无时间过滤'

        if config_model=='spider.Spider' and not title_check_code:
            time_check += 'spider.Spider模板未做标题过滤'
    except:
        time_check = '时间过滤异常'

    CC1.dedup_uri = None
    # CC1.c_time = datetime.datetime.utcnow() - datetime.timedelta(days=10000)
    # CC1.max_interval = datetime.timedelta(days=10000)

    CC1.init_dedup()
    CC1.init_downloader()
    CC1.downloader.session.verify=False
    try:
        CC1.get_start_urls()
    except:
        CC1.get_start_urls(None)
    try:
        #try_task_url = task_url.replace('\\','')
        #task_url = eval(try_task_url)
        task_url = json.loads(task_url)
        task_url['url'] = str(task_url.get('url'))
        datetime_keys = task_url.get('meta',{}).get('__datetime_keys__',[])
        bool_keys = task_url.get('meta',{}).get('__bool_keys__',[])
        detail_url = task_url.get('meta',{}).get('__detail_url__','')
        total_keys = task_url.get('meta',{}).keys()
        for item_key in total_keys:
            if item_key in datetime_keys:
                item_str_value = task_url.get('meta',{}).get(item_key,'')
                try:
                    item_value = datetime.datetime.strptime(item_str_value,'%Y-%m-%d %H:%M:%S')
                    task_url['meta'].update({item_key:item_value})
                except:
                    pass
            elif item_key in bool_keys:
                item_str_value = task_url.get('meta',{}).get(item_key,'')
                try:
                    item_value = eval(item_str_value)
                    task_url['meta'].update({item_key:item_value})
                except:
                    pass
            else:
                if isinstance(task_url.get('meta',{}).get(item_key,''),unicode):
                    task_url.get('meta',{})[item_key]=str(task_url.get('meta',{}).get(item_key,''))
    except:
        detail_url = task_url
        pass

    if detail_url:
        task_result = url_db.hget('auto_check_add:hash_url_infos',detail_url)
        if task_result:
            task_result = json.loads(task_result)
    else:
        task_result = {}

    if isinstance(task_url,dict):
        task_url_dict = task_url
        join_url = task_url_dict.get('url')
        download_url = task_url
        download_url.update({'timeout':3})
    else:
        task_url_dict = {'url':task_url,'meta':{'list_page_url':'list_page_url'}}
        join_url = task_url
        download_url={'url':task_url,'timeout':3,'meta':{'list_page_url':'list_page_url'}}

    start_timing = int(time.time())
    resp = CC1.download(download_url)

    if int(time.time())-start_timing>=10:#resp.elapsed.total_seconds() >= 10:
        CC1.proxy_enable = True
        CC1.proxy_url = 'http://192.168.132.81/qingting_proxy_https.txt'
        CC1.init_dedup()
        CC1.init_downloader()
        resp = CC1.download(download_url)
    elif int(time.time())-start_timing>=10 and task_result:#resp.elapsed.total_seconds() >= 10 and task_result:
        result = task_result
    if resp:
        try:
            result = CC1.parse_detail_page(resp,task_url)
        except:
            result = CC1.parse_detail_page(resp, task_url_dict)

    if result:
        result = result[0]
        base_check = '1'
    else:
        config_base2 = load_module(config_content_change,'CC2')
        CC2 = config_base2.MySpider()
        CC2.dedup_uri = None
        CC2.c_time = datetime.datetime.utcnow() - datetime.timedelta(days=10000)
        CC2.max_interval = datetime.timedelta(days=10000)
        CC2.init_dedup()
        CC2.init_downloader()
        try:
            CC2.get_start_urls()
        except:
            CC2.get_start_urls(None)

        try:
            result = CC2.parse_detail_page(resp,task_url)
        except :
            result = CC2.parse_detail_page(resp, task_url_dict)
        if result:
            result = result[0]
            base_check = '2'
        else:
            return {}

    pic_urls = result.get("pic_urls",[])
    video_urls = result.get("video_urls",[])
    audio_urls = result.get("audio_urls",[])
    media_list_result = pic_urls+video_urls+audio_urls
    if result.get("content_xml") and media_list_result:
        content_xml = result.get("content_xml","")
        media_list = re.findall('''(?i)[\'\"]*\s*url{3,4}\s*[\'\"]*\s*[:=]\s*[\'\"]\s*(\S+?)\s*[\'\"]''',content_xml) + \
                     re.findall('''(?i)[\'\"]*\s*link\s*[\'\"]*\s*[:=]\s*[\'\"]\s*(\S+?)\s*[\'\"]''',content_xml) + \
                     re.findall('''(?i)open\s*\(\s*[\'\"]\s*(\S+?)\s*[\'\"]''',content_xml) + \
                     re.findall('''(?i)[openurlnavigate]{7,8}\s*\(\s*[\'\"]\s*(\S+?)\s*[\'\"]''',content_xml) + \
                     re.findall('''(?i)\.location\s*=\s*[\'\"]\s*(\S+?)\s*[\'\"]''',content_xml)+ \
                     re.findall('''(?i)[a\s][\'\"]*\s*href\s*[\'\"]*\s*[:=]\s*[\'\"]\s*(\S+?)\s*[\'\"]''',content_xml) + \
                     re.findall('''(?i)[a\s][\'\"]*\s*data-src\s*[\'\"]*\s*[:=]\s*[\'\"]\s*(\S+?)\s*[\'\"]''',content_xml) + \
                     re.findall('''(?i)[a\s][\'\"]*\s*src\s*[\'\"]*\s*[:=]\s*[\'\"]\s*(\S+?)\s*[\'\"]''',content_xml) + \
                     re.findall('''(?i)[a\s][\'\"]*\s*source\s*[\'\"]*\s*[:=]\s*[\'\"]\s*(\S+?)\s*[\'\"]''',content_xml)
        for i in media_list:
            med_url = urljoin(join_url,i)
            if med_url in media_list_result:
                content_xml = content_xml.replace(i,med_url)
        result.update({"content_xml" : content_xml,'base_check':base_check})
    result.update({"site_Name":CC1.siteName,"site_domain":CC1.site_domain,'time_check':time_check,'self.gtime':CC1.gtime+datetime.timedelta(minutes=1)})
    if base_info_flag:
        try:
            base_info_flag = json.loads(base_info_flag).get('info_flag','')
            if base_info_flag:
                result.update({"info_flag":"原配置：%s，现配置：%s"%(base_info_flag,CC1.info_flag)})
        except:
            result.update({"info_flag":"原配置：%s，现配置：%s"%(base_info_flag,CC1.info_flag)})
    else:
        result.update({"info_flag":CC1.info_flag})
    return result



class DateEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, datetime.datetime):
            return obj.strftime('%Y-%m-%d %H:%M:%S')
        elif isinstance(obj, date):
            return obj.strftime("%Y-%m-%d 00:00:00")
        else:
            return json.JSONEncoder.default(self, obj)

# print sys.argv
config_id = sys.argv[1]
task_url = unquote(sys.argv[2])
redis_content_keys = sys.argv[3]
try:
    message = get_detail_result(config_id,task_url,redis_content_keys)
    if message:
        message = result_check(message)
    print "####fenge_start####",json.dumps(message,cls=DateEncoder),"####fenge_end####"
except Exception,e:
    print "####fenge_error_start####",e,"####fenge_error_end####"
print type(message)
