# -*- coding: utf-8 -*-
import time
import urllib
import json
import pydash
import MySQLdb as mdb
import scrapy

from ..items import CBookArticleInfo


class WeixinSpider(scrapy.Spider):
    name = "weixin"
    allowed_domains = ["mp.weixin.qq.com"]
    start_urls = ['https://mp.weixin.qq.com/']

    # 这里三组数据可能会被替换
    key = "e0f557c9f11098d158ca39fc87c15c672f4c877216b4b5e0effe5dcbc1eda22d8f42e02f74cd8650060ff627d2136ba03ab1fad364d2929a16ca5a815170f90a95bff12fbed6c35d50a4cd2ac4041221"
    pass_ticket = "7dbS/Wn4AeZPgUYrYL0J77IGihnAHDbrvbLUk8cglVh98YcXXeRre2vrB7gM0/Fb"
    wap_sid2 = 'CMX34fIJElwyV01NR0l5alM2ckZpX1lBLVM1QVZWS0YwSjZfNTU3eU1CbnVNTnlPVFdBc0RfYXBYQ2RDNUlnbm4tWnJmNEdYQ0dVdlFjOTR2ejhQemFhekJ6WXR5NHNEQUFBfjCnwNPJBTgMQJRO'

    public_content_list = [
        {'biz': 'MjM5MDI5OTkyOA==', 'name': '一条'}
        # {'biz': 'MjM5ODY0MzY2NQ==', 'name': u'书单号', 'avatar': 'http://wx.qlogo.cn/mmhead/Q3auHgzwzM56G7qFdwFDrKdUsW5sRmjOaic4lw2NiayUDhOIIr5kc1Og/0'}
        # {'biz': 'MzIyMjE4ODUxNQ==', 'name': '萨沙1912', 'avatar': 'http://wx.qlogo.cn/mmhead/Q3auHgzwzM5OghamDP2Oic8wl6Afu60r82JaianpMkW90gduW71Oqg0w/0'}
    ]
    params = {
        'action': 'getmsg',
        'f': 'json',
        'count': 20,
        'scene': 123,
        'is_ok': 1,
        'uin': 'MjY1NjU5ODk4MQ==',
        'wxtoken': '',
        'x5': 0,
        'f': 'json',
        'key': key,
        'pass_ticket': pass_ticket
    }

    cookies = {
        'wxuin': 2656598981,
        'pass_ticket': pass_ticket,
        'wap_sid2': wap_sid2
    }

    client = None

    def check_not_exist(self, custom_item_id):
        # 查看数据库中是否已经存在爬取数据
        cursor = self.client.cursor()
        query_count_sql = 'SELECT COUNT(*) FROM article_brief_info WHERE custom_item_id=%s'
        cursor.execute(query_count_sql, [
            custom_item_id
        ])
        count = cursor.fetchone()
        cursor.close()

        if count[0] == 0:
            return True
        else:
            return False

    def create_list_url(self, params):
        url = 'https://mp.weixin.qq.com/mp/profile_ext?' + \
            urllib.urlencode(params)
        return url

    def start_requests(self):
        self.client = mdb.connect(
            host='0.0.0.0',
            port=3306,
            user='root',
            passwd='ilove1388',
            db='cbook',
            charset='utf8'
        )
        self.client.autocommit(True)

        for public_content in self.public_content_list:
            params = self.params

            params['__biz'] = public_content['biz']
            params['frommsgid'] = 0
            url = self.create_list_url(params)

            meta = {
                'biz': params['__biz'],
                'source': public_content['name']
            }

            yield scrapy.Request(url,
                                 callback=self.parse_list,
                                 cookies=self.cookies,
                                 meta=meta)

    def parse_list(self, response):
        content = json.loads(response.body_as_unicode())
        article_list = json.loads(content['general_msg_list'])

        now = int(time.time())

        scrapy_count = 0
        length = len(article_list['list'])

        for article in article_list['list']:
            if article.has_key('app_msg_ext_info'):
                article_info = CBookArticleInfo(
                    abstract=article['app_msg_ext_info']['digest'],
                    go_detail_count=None,
                    article_type='article',
                    comments_count=None,
                    channel='weixin',
                    cover_image_url=article['app_msg_ext_info']['cover'],
                    title=article['app_msg_ext_info']['title'],
                    # source=article['app_msg_ext_info']['author'],
                    source=response.meta['source'],
                    detail_url=article['app_msg_ext_info']['content_url'],
                    created_time=now,
                    update_time=now,
                    published_time=article['comm_msg_info']['datetime'],
                    custom_item_id='weixin_' + \
                    str(article['comm_msg_info']['id']),
                )

                if self.check_not_exist(article_info.get('custom_item_id')):
                    scrapy_count = scrapy_count + 1
                    yield scrapy.Request(article_info.get('detail_url'),
                                         callback=self.parse_content,
                                         cookies=self.cookies,
                                         meta={'article_info': article_info})
            else:
                length = length - 1

        # 对于增量需要再次做验证
        if content['can_msg_continue'] == 1 and scrapy_count == length and length > 0:
            params = self.params

            params['__biz'] = response.meta['biz']

            min_one = pydash.numerical.min_by(
                article_list['list'], 'comm_msg_info.id')
            params['frommsgid'] = min_one['comm_msg_info']['id']

            url = self.create_list_url(params)
            yield scrapy.Request(url,
                                 callback=self.parse_list,
                                 cookies=self.cookies,
                                 meta=response.meta)

    def parse_content(self, response):
        article_info = response.meta['article_info']

        context = response.css('#img-content').extract_first()
        article_info['context'] = context

        yield article_info
