# -*- coding:utf8 -*-
import copy
import math
import typing
import json
import traceback
import datetime
import os
import pyssdb
from urllib.parse import urlparse
from urllib.request import urlretrieve
from lxml import etree
from scrapy import Request
from squirrel_core.commons.utils.get_config import get_config
from squirrel_core.commons.utils.tools import calc_str_md5, upload_file
from squirrel_core.frame.spider_makaka import MakakaSpider
file_path = os.environ.get("FILE_PATH", "/")
need_ssdbstore_dup = True
serialNumber = ""


class shuzixindong_race(MakakaSpider):
    name = "shuzixindong_race"
    race_url = "https://api-changzheng.chinaath.com/changzheng-race-proxy-api/api/race/zone/query/pageForApp"
    header = {
        "machineCode": "6bd839d8b9f7f76d",
        "osId": "1002",
        "device": "pad/12 HUAWEI MGA-AL00",
        "versionNumber": "519",
        "versionCode": "5.54.0",
        "terminalType": "1",
        "Content-Type": "application/x-www-form-urlencoded",
        "Host": "api-changzheng.chinaath.com",
    }
    need_ssdbstore_dup = True
    serialNumber = ""
    base_config = get_config(sections="base")
    ssdb_host = base_config.get('ssdb_host')
    ssdb_port = base_config.get('ssdb_port')

    def get_ext_requests_or_urls(self, data=None):
        if data and isinstance(data, str):
            data = json.loads(data)
        if data and isinstance(data, typing.MutableMapping):
            self.serialNumber = data.get("serialNumber", "")
            self.upload_procedure({"serialNumber": self.serialNumber, "code": 100, "message": "任务启动成功"})
            client = pyssdb.Client(host=self.ssdb_host, port=self.ssdb_port)
            try:
                token = client.qfront("shuzixindong_cookie")
            except:
                token = ""
            finally:
                client.disconnect()
            if token:
                header = copy.deepcopy(self.header)
                header.update({"token": token.decode('utf-8'), "Content-Type": "application/json; charset=UTF-8"})
                start_date, end_date = self.get_date()
                race_data = {"nodeLevel": 1, "pageNo": 1, "pageQueryFlag": True, "pageSize": 1000, "parentCode": 10,
                             "signUpStartTimeStart": start_date, "signUpStartTimeEnd": end_date}
                yield Request(url=self.race_url, callback=self.parse, method="POST", headers=header,
                              errback=self.err_parse,
                              body=json.dumps(race_data), meta={"header": header}, dont_filter=True)
            else:
                self.logger.info("数字心动未登陆")
                self.upload_procedure({"serialNumber": self.serialNumber, "code": 101, "message": "数字心动未登陆，请先进行登陆"})
                self.close_after_idle = True
                self.force_to_close_spider = True

    def parse(self, response, **kwargs):
        try:
            self.logger.info("开始解析比赛信息")
            page_flag = response.meta.get("page_flag", False)
            header = response.meta.get("header", {})
            data = json.loads(response.text)
            race_data_list = response.meta.get("race_data_list", []) if page_flag else []
            race_list = data.get("data", {}).get("results", [])
            for race in race_list:
                race_code = race.get("raceInfoResDTO", {}).get("raceCode", "")
                yield Request(url="https://api-changzheng.chinaath.com/changzheng-race-proxy-api/api/race/proxy/query/h5/new/detail",
                              callback=self.race_detail_parse, method="POST", headers=header,
                              body=json.dumps({"raceId":race_code,"channel":0}), errback=self.err_parse,
                              meta={"header": header})
            if not page_flag:
                total = data.get("data", {}).get("totalCount", 0)
                if total > 1000:
                    page_count = math.ceil(total / 1000)
                    page_list = list(range(2, page_count + 1))
                    for page in page_list:
                        race_data = {"nodeLevel": 1, "pageNo": page, "pageQueryFlag": True, "pageSize": 1000,
                                     "parentCode": 10}
                        yield Request(url=self.race_url, callback=self.parse, method="POST", headers=header,
                                      body=json.dumps(race_data), errback=self.err_parse, dont_filter=True,
                                      meta={"header": header, "page_list": page_list, "race_data_list": race_data_list, "page_flag": True})
        except Exception:
            self.logger.error(f"解析比赛信息时出错：{traceback.format_exc()}")

    def race_detail_parse(self, response, **kwargs):
        try:
            self.logger.info("开始解析比赛详细信息")
            header = response.meta.get("header", {})
            data = json.loads(response.text)
            detail_data = data.get("data", {})
            race_id = detail_data.get("raceId", "")
            url = f"https://api-changzheng.chinaath.com/changzheng-race-center-api/api/race/query/h5/new/content?raceId={race_id}"
            yield Request(
                url=url, callback=self.race_detail_content_parse, headers=header, dont_filter=True,
                errback=self.err_parse, meta={"race_id": race_id, "detail_data": detail_data})

        except Exception:
            self.logger.error(f"解析比赛详细信息时出错：{traceback.format_exc()}")

    def race_detail_content_parse(self, response, **kwargs):
        try:
            content_list = []
            self.logger.info("开始解析比赛详细信息")
            detail_data = response.meta.get("detail_data", {})
            race_id = response.meta.get("race_id", "")
            detail_img = detail_data.get("raceBaseInfo", {}).get("raceDetailImg", "")
            if detail_img:
                detail_img_url = f"https://img.shuzixindong.com{detail_img}"
                pic_txt = self.download_pic(detail_img_url, race_id)
                detail_data["raceBaseInfo"]["raceDetailImg"] = pic_txt
            data = json.loads(response.text)
            content_str = data.get("data", {}).get("content", "")
            if content_str:
                tree = etree.HTML(content_str)
                d_list = tree.xpath("//p")
                for d in d_list:
                    if d.xpath(".//img"):
                        pic_list = d.xpath(".//img/@src")
                        for pic_url in pic_list:
                            if pic_url:
                                pic_txt = self.download_pic(pic_url, race_id)
                                content_list.append(pic_txt)
                            else:
                                pass
                    else:
                        c_str = "".join(d.xpath("//text()")).replace("\n", "").replace(' ', "")
                        if c_str:
                            content_list.append(d.xpath("string()"))
            r_data = {"api_data": detail_data, "html_data": content_list}
            result_dict = {"serialNumber": self.serialNumber, "webType": self.name_first,
                           "crawlerType": self.name_second, "data": str(r_data)}
            result = self.result_item_assembler(response)
            result['result_data'] = result_dict
            result['_dup_str'] = calc_str_md5(str(r_data))
            yield result
        except Exception:
            self.logger.error(f"解析比赛详细信息时出错：{traceback.format_exc()}")

    def download_pic(self, pic_url, match_id):
        try:
            path = urlparse(pic_url).path
            suffix = os.path.splitext(path)[1].lower()
            pic_md5 = calc_str_md5(pic_url)
            pic_name = f"{match_id}_{pic_md5}{suffix}"
            dir_path = os.path.join(file_path, self.name_first)
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
            save_path = os.path.join(dir_path, pic_name)
            if not os.path.exists(save_path):
                urlretrieve(pic_url, save_path)
                upload_flag = upload_file(save_path, f"{self.name_first}/{pic_name}")
                if upload_flag:
                    self.logger.info(f"{save_path}上传成功")
                else:
                    self.logger.info(f"{save_path}上传失败")
            return f"图片:/{self.name_first}/{pic_name}"
        except Exception:
            self.logger.error(f"下载{pic_url}失败：{traceback.format_exc()}")
            return ""

    def get_date(self):
        year = datetime.datetime.now().year
        start_date = datetime.datetime(year, 1, 1).timestamp()
        end_date = datetime.datetime(year, 12, 31, 23, 59, 59).timestamp()
        return int(start_date)*1000, int(end_date)*1000

    def err_parse(self, failure):
        self.logger.warning(f"请求失败：{failure.request.url},错误原因:{traceback.format_exc()}")
