# coding=utf-8
import json
import logging
import re
import sys

import os

import time
from scrapy import Spider, Request

from scrapy_sinahealth import utils
from scrapy_sinahealth.customizers import WechatCustomizer
from scrapy_sinahealth.items import ScrapyHealthItem

reload(sys)
sys.setdefaultencoding("utf-8")


class Wechat(Spider):
    name = "wechat"
    download_delay = 2
    start_url_list_file_path = "E:/FiddlerResponse"

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy_sinahealth.middlewares.WechatRandomUserAgentMiddleware': 501,
        },
        'ITEM_PIPELINES': {
            'scrapy_sinahealth.pipelines.ImageLoadPipeline': 600,
            'scrapy_sinahealth.pipelines.SQLStorePipeline': 610,
        },
    }

    def __init__(self):
        super(Wechat, self).__init__();
        self.customizer = WechatCustomizer()
        start_urls = self.get_wechat_public_articles_url_list()

        self.start_urls = start_urls

    def get_wechat_public_articles_url_list(self):
        '''
        通过fiddler截取的微信公众号地址（需每天截取，会过期）：
        https://mp.weixin.qq.com/mp/profile_ext?action=home&__biz=MjM5Nzc3MjM2OA==&scene=123&uin=MjUyMTkzNzQ2MQ%3D%3D&key=38e6fa341f6d490e690f60c6901b2ece00fedccbe879d49fb5220a9eb99139749278996e13076bc539e024f2340dc40d78db77e19fc45cfa47d79965e8c0fa44afbafe3dafc0d44626fe794e4b0f7510&devicetype=Windows+10&version=62060028&lang=zh_CN&a8scene=1&pass_ticket=9TfAzZsq9hobvSvQdzzguMXabOV7X0xqJgjHjnYSuPewwq8X%2FSoHRhIAF0sp0beY&winzoom=1 HTTP/1.1
        :return:
        '''
        files_in_dir = os.listdir(self.start_url_list_file_path)
        request_file_list = []
        today = time.strftime('%Y-%m-%d', time.localtime(time.time()))

        for file_path in files_in_dir:
            if file_path.endswith("_%s_request" % today):
                file_full_path = os.path.join('%s/%s' % (self.start_url_list_file_path, file_path))
                request_file_list.append(file_full_path)
                # print file_full_path

        if not request_file_list:
            raise Exception("there is no request files in the path %s..." % self.start_url_list_file_path)

        list = []
        request_protocol = "https"
        for request_file_path in request_file_list:
            with open(request_file_path) as f:
                first_line = f.readline().strip()
                request_method, request_url, request_protocol_version = first_line.split(" ")
                line = first_line
                while line:
                    line = f.readline().strip()
                    if line.find("Host: ") >= 0:
                        host = line.split("Host: ")[1]
                        break

            # list.append(request_url)
            if request_url.startswith("http://") or request_url.startswith("https://"):
                list.append(request_url)
            else:
                list.append("%s://%s%s" % (request_protocol, host, request_url))
        return list
        # list = []
        # file_path = self.start_url_list_file_path
        # for line in open(file_path):
        #     list.append(line)
        #
        # return list

    def parse(self, response):
        '''
        parse the list page of 微信公众号, then yield the content url request
        :param response:
        :return:
        '''
        regex = r'var msgList *?= *?(\'.*\')'
        pattern = re.compile(regex, re.IGNORECASE);
        # msg_string_list = re.findall(pattern, response.text)
        msg_string_iter = re.finditer(pattern, response.text)

        valid_response = False
        for img_iter in msg_string_iter:
            msg_string = img_iter.group(1)
            valid_response = True

        if valid_response:
            msg_string = msg_string.replace('&quot;', '"')
            msg_string = msg_string.rstrip("'")
            msg_string = msg_string.lstrip("'")
            # msg_string = msg_string_list[0].replace('&quot;', '"')
            msg_list_json = json.loads(msg_string)
            for msg in msg_list_json['list']:
                app_msg_ext_info = msg['app_msg_ext_info']
                main_article_url = self.cleanup_url(app_msg_ext_info['content_url'])
                # main_article_url = main_article_url.replace('\\','')
                yield Request(main_article_url, callback=self.parse_content)

                multi_app_msg_item_list = app_msg_ext_info['multi_app_msg_item_list']
                for mmsg in multi_app_msg_item_list :
                    article_url = self.cleanup_url(mmsg['content_url'])
                    # article_url = main_article_url.replace('\\','')
                    yield Request(article_url, callback=self.parse_content)
        else:
            logging.error("response is not valid, the plist url maybe expired...")
            logging.error(response.text)
            raise Exception("response is not valid, the plist url maybe expired...")

    def replace_img_src(self, html, image_url, new_image_full_path):
        html = html.replace('data-src', 'src')
        html = html.replace(image_url, new_image_full_path)
        return html

    def cleanup_url(self, url):
        '''
        cleanup the url resides in json structure

        :param url:
        :return:
        '''
        return url.replace('\\', '')

    def parse_content(self, response):
        '''
        parse the title, content of the article
        :param response:
        :return:
        '''
        logging.info("processing content url %s..." % response.url)
        item = ScrapyHealthItem()

        item["url"] = response.url.strip()
        if response.request.headers.has_key("Referer"):
            item["referer"] = response.request.headers['Referer'].strip()
        else:
            item["referer"] = ""

        item["spider"] = self.name
        item["category"] = ""
        item["tag"] = ""

        issue_date = utils.select_default(response, '//div[@id="img-content"]/div[@id="meta_content"]/em[@id="post-date"]/text()')
        item['issue_date'] = issue_date

        source = utils.select_default(response, '//div[@id="img-content"]/div[@id="meta_content"]/a[@id="post-user"]/text()')
        item['source'] = source;

        title = utils.select_default(response, '//div[@id="img-content"]/h2[@id="activity-name"]/text()')
        item["title"] = title;

        content = utils.select_default(response, '//div[@id="img-content"]/div[@id="js_content"]')
        item["content"] = content;

        image_urls = utils.select_all(response, '//img/@data-src')
        item['image_urls'] = image_urls

        yield item
