#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time    : 2021/8/9 10:48
# @Author  : Samge
import json
import time

import scrapy

from itkz import token_util
from itkz.spider_ext.pipelines.itkz import ItkzDb
from itkz.spider_ext.items.itkz import ItkzItem
from itkz.spiders.base import BaseCommonSpider
from itkz.resources.utils import settings_util, bd_info_util

null = None
false = False
true = True


class ItkzBdSpider(BaseCommonSpider):
    name = "ItkzBdSpider"

    DEFAULT_START_PAGE = 1
    IS_RESTART = True
    BATCH = 1
    # 数据库对象
    itkz_db = None

    # 详情页模板
    URL_DTL_TEMPLATE = "https://www.itkz.net/goods/{data_id}.html"
    # 资源购买模板
    URL_BUY_TEMPLATE = "https://www.itkz.net/api/buy/{data_id}"
    # 资源txt模板
    URL_TXT_TEMPLATE = "https://www.itkz.net/api/down/{data_id}.txt"

    IS_POST = True

    TOKEN = None
    ACCOUNT = None
    PASSWORD = None
    custom_settings = {
        'ITEM_PIPELINES': {'itkz.spider_ext.pipelines.itkz.ItkzDb': 301},
        'DOWNLOADER_MIDDLEWARES': {
            'itkz.spider_ext.middlewares.m_agent.RandomUserAgentMiddleware': 542,
            'itkz.spider_ext.middlewares.m_proxy.ProxyMiddleware': 544,

            'itkz.spider_ext.middlewares.m_splash.RandomUserAgentMiddleware': 700,
            'itkz.spider_ext.middlewares.m_splash.SplashProxyMiddleware': 701,
            'scrapy_splash.SplashCookiesMiddleware': 723,
            'scrapy_splash.SplashMiddleware': 725,
            'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
        },

        'SPIDER_MIDDLEWARES': {'scrapy_splash.SplashDeduplicateArgsMiddleware': 100},
        'DUPEFILTER_CLASS': 'scrapy_splash.SplashAwareDupeFilter',
        'HTTPCACHE_STORAGE': 'scrapy_splash.SplashAwareFSCacheStorage',

        'SLEEP_TIME': 1,
        'DOWNLOAD_DELAY': 0.1,
        'DOWNLOAD_TIMEOUT': 12,
        'IS_USE_PROXY': False,
        'IS_USE_AGENT': False,
        'ROBOTSTXT_OBEY': False,
        'CONCURRENT_REQUESTS': 1,
    }
    # custom_settings = settings_util.get_custom_settings(
    #     {
    #         'DOWNLOADER_MIDDLEWARES': {
    #             'itkz.spider_ext.middlewares.m_agent.RandomUserAgentMiddleware': 542,
    #         },
    #         'ITEM_PIPELINES': {'itkz.spider_ext.pipelines.itkz.ItkzDb': 301},
    #         'IS_USE_PROXY': False,
    #         'IS_USE_AGENT': False,
    #         'ROBOTSTXT_OBEY': False,
    #         'CONCURRENT_REQUESTS': 1,
    #     }
    # )
    # 要更新的id列表
    dataIds = None

    def init_child(self, kwargs):
        self.itkz_db = ItkzDb()
        self.dataIds = self.itkz_db.get_all_dataId()
        self.max_page = len(self.dataIds or [])

    def get_header(self):
        return {
            'user-agent': token_util.DEFAULT_USER_AGENT,
            'content-type': 'application/json',
            'accept-language:': 'zh-CN,zh;q=0.9,en;q=0.8',
            'authorization': f'Bearer {self.TOKEN}'
        }

    def refresh_token(self):
        """
        刷新请求的token
        :return:
        """
        token: str = token_util.get_token(self.ACCOUNT, self.PASSWORD)
        if token:
            self.TOKEN = token
            self.send_log(f'成功更新token：{token}')
            self.send_log(f"get_header成功，睡眠3秒后再进行请求. token={token}\n")
            time.sleep(3)

    def get_url(self, page):
        dataId = self.dataIds[page].get('i_dataId')
        url = self.URL_BUY_TEMPLATE.format(data_id=dataId)
        self.send_log(f'\n\n当前处理链接：{url} ----------------------------------------->')
        return url

    def get_request_mate(self, page):
        dataId = self.dataIds[page].get('i_dataId')
        return {
            'dataId': dataId,
            # 'proxy': 'http://192.168.3.169:8888'
        }

    def get_post_body(self, page):
        return json.dumps({}).encode('utf-8')

    def get_dbs(self):
        return [self.itkz_db]

    def parse(self, response):
        try:
            r = eval(response.text)

            if r.get('message') == "请先登录":
                self.send_log(f'token过期，需要重新获取：{response.text}')
                self.refresh_token()
                return

            dataId = response.meta.get('dataId')
            if r.get('code') == 200:
                url: str = self.URL_TXT_TEMPLATE.format(data_id=dataId)
                self.send_log(f'请求成功，获取到的txt链接：{url}')
                yield scrapy.FormRequest(url=url,
                                         callback=self.parse_dtl,
                                         dont_filter=True,
                                         meta=response.meta,
                                         body=self.get_post_body(0),
                                         method='POST',
                                         headers=self.get_header())
        except Exception as e:
            self.send_log(f"parse {response.text} {e} \n{response.url}\n")

    def parse_dtl(self, response):
        try:
            if not response.text or 'http' not in response.text:
                self.send_log(f"parse_dtl 返回内容不符合预期，跳过：{response.text} \n{response.url}\n")
                return
            dataId = response.meta.get('dataId') or ''
            i_bd_txt = self._trim_bd_txt(response.text) or ''
            i_bd_url, i_bd_pw, i_jy_pw = bd_info_util.get_bd_info(i_bd_txt)
            temp_item = ItkzItem()
            temp_item['i_dataId'] = dataId or ''
            temp_item['i_bd_txt'] = i_bd_txt or ''
            temp_item['i_bd_url'] = i_bd_url or ''
            temp_item['i_bd_pw'] = i_bd_pw or ''
            temp_item['i_jy_pw'] = i_jy_pw or ''
            self.send_log(f'解析文本成功：\n{temp_item}')
            yield temp_item
        except Exception as e:
            self.send_log(f"parse_dtl {response.text} {e} \n{response.url}\n")

    def _trim_bd_txt(self, txt):
        if txt and '来源网站' in txt:
            txt = txt.split('来源网站')[0]
        return (txt or '').replace('\n', '').replace('\r', '')
