# -*- coding: utf-8 -*-
# @Time    : 2020/2/13 10:33
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import os
from urllib.parse import urlencode
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.redis_db import Redis_DB


class ZntjNews(scrapy.Spider):
    '''
    奏耐天津  md5排序
    '''
    name = 'zntj'
    types = ['1','7','3','12','8','11','2','3','4','5','6','9','10','20','56']

    t = Times()
    redis = Redis_DB()

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
            'NewsSpider.middlewares.RandomUserAgentMiddleware': 543,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    filepath = os.path.dirname(os.getcwd()) + "/NewsSpider/tools/zntj.json"

    def start_requests(self):
        with open(self.filepath, 'r') as f:
            result = f.read()
            aa = json.loads(result)
            token = aa['data']['token']
        for t in self.types:
            params = {'type': '1', 'refresh': '1', 'forum_id': str(t),
                      'token': token}
            md5_params = ''.join(sorted(params.values()))
            signature = Utils.md5_encrypt(md5_params)
            payload = urlencode(params)
            url = f'https://api.zounai.com/v2/forum/thread?{payload}&signature={signature}'
            yield scrapy.Request(url, callback=self.parse_text, dont_filter=True, meta={"type": t, 'num': 1})

    def parse_text(self, response):
        print("正在访问列表页:", response.url)
        data_ = json.loads(response.text)
        message = data_['code']
        t = response.meta['type']
        num = response.meta['num']
        if message != 0:
            with open(self.filepath, 'r') as f:
                result = f.read()
                aa = json.loads(result)
                token = aa['data']['token']
            params = {'appid': 'dKSXxi1LN9jcfbxJ',
                      'appsecret': 'xTzwemnZIRebS1Ny5E08qQgfjv6Jvjl4',
                      'expire_token': token}
            md5_params = ''.join(sorted(params.values()))
            signature = Utils.md5_encrypt(md5_params)
            parmas2 = {'appid': 'dKSXxi1LN9jcfbxJ',
                       'appsecret': 'xTzwemnZIRebS1Ny5E08qQgfjv6Jvjl4',
                       'expire_token': token, "signature": ''}
            parmas2['signature'] = signature
            payload = urlencode(parmas2)
            posturl = f'https://api.zounai.com/v2/refresh_token?{payload}'
            yield scrapy.Request(posturl, callback=self.parse_json, method='POST',
                                 dont_filter=True)
            ################################################
            with open(self.filepath, 'r') as f:
                result = f.read()
                aa = json.loads(result)
                token2 = aa['data']['token']
            params_ = {'type': '1', 'refresh': str(num), 'forum_id': str(t),
                       'token': token2}
            md5_params2 = ''.join(sorted(params_.values()))
            signature2 = Utils.md5_encrypt(md5_params2)
            payload2 = urlencode(params_)
            url = f'https://api.zounai.com/v2/forum/thread?{payload2}&signature={signature2}'
            yield scrapy.Request(url, callback=self.parse_text, dont_filter=True, meta={"type": t, 'num': 1})
        else:
            for d in data_['data']:
                dicts = {}
                url = d['share_url']
                id = Utils.url_hash(url)
                content = d['first_sentence']
                author = d['username']
                title = content[:15]
                pubdate = Utils.process_timestamp(d['create_time'])
                dicts['url'] = url
                dicts['pubdate'] = pubdate
                dicts['author'] = author
                dicts['content'] = content
                dicts['id'] = id
                dicts['title'] = title
                content_id = d['id']
                if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                    print('parse_text id:%s已存在' % id)
                    continue
                with open(self.filepath, 'r') as f:
                    result = f.read()
                    aa = json.loads(result)
                    token2 = aa['data']['token']
                params_3 = {'thread_id': str(content_id),
                            'token': token2}
                md5_params3 = ''.join(sorted(params_3.values()))
                signature3 = Utils.md5_encrypt(md5_params3)
                payload3 = urlencode(params_3)
                content_url = f'https://api.zounai.com/v2/forum/thread/detail?{payload3}&signature={signature3}'
                yield scrapy.Request(content_url, callback=self.parse, dont_filter=True, meta=dicts)
            if num < 3:
                with open(self.filepath, 'r') as f:
                    result = f.read()
                    aa = json.loads(result)
                    token = aa['data']['token']
                num1 = num + 1
                params = {'type': '1', 'refresh': str(num1), 'forum_id': str(t),
                          'token': token}
                md5_params = ''.join(sorted(params.values()))
                signature = Utils.md5_encrypt(md5_params)
                payload = urlencode(params)
                url = f'https://api.zounai.com/v2/forum/thread?{payload}&signature={signature}'
                yield scrapy.Request(url, callback=self.parse_text, dont_filter=True, meta={"type": t, 'num': num1})

    def parse(self, response):
        item = NewsItem()
        datas = json.loads(response.text)
        data = datas['data']
        try:
            content = data['content']['main']['word']
        except:
            content = response.meta['content']
        item['id'] = response.meta['id']
        item['url'] = response.meta['url']
        item['title'] = response.meta['title']
        item['pubdate'] = response.meta['pubdate']
        item['content'] = content
        item['author'] = response.meta['author']
        item['formats'] = "app"
        item['dataSource'] = '奏耐天津APP'
        item['serchEnType'] = "奏耐天津"
        item['html'] = content
        item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        item['collectProcess'] = 'crawl_news'
        item['serverIp'] = "113.128.12.74"
        yield item

    def parse_json(self, response):
        with open(self.filepath, 'w') as f:
            f.write(response.text)
