# -*- coding:utf8 -*-
import json
import typing
import copy
import os
import traceback
from scrapy import Request
from urllib.parse import urlparse
from squirrel_core.commons.utils.tools import calc_str_md5, upload_file
from squirrel_core.frame.spider_makaka import MakakaSpider
file_path = os.environ.get("FILE_PATH", "/")


class malasong_race(MakakaSpider):
    name = "malasong_race"
    start_url = "https://api-changzheng.chinaath.com/changzheng-content-center-api/api/homePage/official/searchCompetitionMls"
    detail_url = "https://api-changzheng.chinaath.com/changzheng-content-center-api/api/homePage/official/searchById"
    header = {
        "Content-Type": "application/json"
    }
    post_data = {"provinceId":"","cityId":"","districtId":"","raceName":"","raceGrade":"","raceStartTime":"","pageNo":1,"pageSize":100}
    detail_data = {"id":"1000283036","type":"SS"}

    need_ssdbstore_dup = True
    serialNumber = ""

    def get_ext_requests_or_urls(self, data=None):
        if data and isinstance(data, str):
            data = json.loads(data)
        if data and isinstance(data, typing.MutableMapping):
            self.serialNumber = data.get("serialNumber", "")
            self.upload_procedure({"serialNumber": self.serialNumber, "code": 100, "message": "任务启动成功"})
            yield Request(url=self.start_url, callback=self.parse, dont_filter=True, body=json.dumps(self.post_data),
                          method="POST", errback=self.err_parse,headers=self.header)

    def parse(self, response, **kwargs):
        page = response.meta.get("page", 1)
        try:
            self.logger.info(f"开始请求第{page}页赛事信息")
            page_flag = response.meta.get("page_flag", True)
            content = json.loads(response.text).get("data", {})
            data_list = content.get("results", [])
            for data in data_list:
                race_id = data.get("raceId", "")
                race_name = data.get("raceName", "")
                detail_data = {"id": race_id, "type": "SS"}
                yield Request(url=self.detail_url, callback=self.parse_detail,
                              body=json.dumps(detail_data), meta={"race_name": race_name},
                              method="POST", errback=self.err_parse, headers=self.header)
            if page_flag:
                page_total = content.get("pageCount", 0)
                for page in range(2, page_total + 1):
                    post_data = copy.deepcopy(self.post_data)
                    post_data["pageNo"] = page
                    yield Request(url=self.start_url, callback=self.parse, dont_filter=True,
                                  body=json.dumps(post_data), meta={"page_flag": False, "page": page},
                                  method="POST", errback=self.err_parse, headers=self.header)
        except Exception:
            self.logger.error(f"请求第{page}页赛事信息出错：{traceback.format_exc()}")

    def parse_detail(self, response):
        race_name = response.meta.get("race_name", "")
        try:
            self.logger.info(f"开始请求【{race_name}】的详情")
            content = json.loads(response.text).get("data",{}).get("ssdetails", {})
            pic_url = content.get("routeMap", "")
            if pic_url:
                yield Request(url=pic_url, callback=self.parse_pic, errback=self.err_parse, headers={"Host": "file.shuzixindong.com"},
                              meta={"base_info": content})
            else:
                result_dict = {"serialNumber": self.serialNumber, "webType": self.name_first,
                               "crawlerType": self.name_second, "data": str(content)}
                result = self.result_item_assembler(response)
                result['result_data'] = result_dict
                result['_dup_str'] = calc_str_md5(str(content))
                yield result
        except Exception:
            self.logger.error(f"请求请求【{race_name}】的详情时出错：{traceback.format_exc()}")

    def parse_pic(self, response):
        base_info = response.meta.get("base_info", {})

        natch_name = base_info.get("name", "")
        pic_txt = self.download_pic(response.url, response.body, natch_name)
        if pic_txt:
            base_info["routeMap"] = pic_txt
        result_dict = {"serialNumber": self.serialNumber, "webType": self.name_first,
                       "crawlerType": self.name_second, "data": str(base_info)}
        result = self.result_item_assembler(response)
        result['result_data'] = result_dict
        result['_dup_str'] = calc_str_md5(str(base_info))
        yield result

    def download_pic(self, pic_url, pic_body, match_id):
        try:
            path = urlparse(pic_url).path
            suffix = os.path.splitext(path)[1].lower()
            pic_md5 = calc_str_md5(pic_url)
            pic_name = f"{match_id}_{pic_md5}{suffix}"
            dir_path = os.path.join(file_path, self.name_first)
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
            save_path = os.path.join(dir_path, pic_name)
            if not os.path.exists(save_path):
                with open(save_path, "wb") as f:
                    f.write(pic_body)
                upload_flag = upload_file(save_path, f"{self.name_first}/{pic_name}")
                if upload_flag:
                    self.logger.info(f"{save_path}上传成功")
                else:
                    self.logger.info(f"{save_path}上传失败")
            return f"图片:/{self.name_first}/{pic_name}"
        except Exception:
            self.logger.error(f"下载{pic_url}失败：{traceback.format_exc()}")
            return ""

    def err_parse(self, failure):
        self.logger.warning(f"请求失败：{failure.request.url},错误原因:{traceback.format_exc()}")
