# -*- coding:utf8 -*-
import json
import traceback
import os
import typing
from urllib.request import urlretrieve
from scrapy import Request
from squirrel_core.commons.utils.tools import calc_str_md5, upload_file
from squirrel_core.frame.spider_makaka import MakakaSpider

file_path = os.environ.get("FILE_PATH", "/")


class peisu_race(MakakaSpider):
    name = "peisu_race"
    url = "https://api.iyiwujiu.com/api/v1/match/races?keyword=&category=&year=&month=&day=&race_region_id=&cert=&sponsor=&status=&page={}&per_page=20"
    race_url = "https://api.iyiwujiu.com/api/v1/match/races/{}"
    header = {
        "Host": "api.iyiwujiu.com",
        "authorization": "Bearer ",
        "charset": "utf-8",
        "User-Agent": "Mozilla/5.0 (Linux; Android 12; SM-F926U Build/V417IR; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/101.0.4951.61 Safari/537.36 MMWEBID/346 MicroMessenger/8.0.47.2560(0x28002F36) WeChat/arm64 Weixin Android Tablet NetType/WIFI Language/zh_CN ABI/arm64 MiniProgramEnv/android",
        "content-type": "application/x-www-form-urlencoded",
        "Accept-Encoding": "gzip,compress,br,deflate",
        "accept": "*/*",
        "Referer": "https://servicewechat.com/wxb49e2c634875cae6/28/page-frame.html"
    }
    serialNumber = ""
    need_ssdbstore_dup = True

    def get_ext_requests_or_urls(self, data=None):
        if data and isinstance(data, str):
            data = json.loads(data)
        if data and isinstance(data, typing.MutableMapping):
            self.serialNumber = data.get("serialNumber", "")
            self.upload_procedure({"serialNumber": self.serialNumber, "code": 100, "message": "任务启动成功"})
        yield Request(url=self.url.format(1), callback=self.parse, dont_filter=True, errback=self.err_parse, headers=self.header)

    def parse(self, response, **kwargs):
        page_no = response.meta.get("page_no", 1)
        try:
            self.logger.info(f"开始解析第{page_no}页的赛事")
            content = json.loads(response.text)
            race_list = content.get("races", [])
            for race in race_list:
                race_id = race.get("id", "")
                yield Request(url=self.race_url.format(race_id), callback=self.parse_race,
                              errback=self.err_parse,
                              headers=self.header)
            if page_no == 1:
                total_pages = content.get("total_pages", 1)
                for page in range(2, total_pages):
                    yield Request(url=self.url.format(page), callback=self.parse, dont_filter=True, errback=self.err_parse,
                                  headers=self.header, meta={"page_no": page})
        except Exception:
            self.logger.info(f"解析第{page_no}页的赛事时出错{response.url}：{traceback.format_exc()}")

    def parse_race(self, response):
        try:
            self.logger.info("开始解析赛事信息")
            content = json.loads(response.text)
            race_id = content.get("id", "")
            race_name = content.get("name", "")
            race_date = content.get("race_date", "")
            race_address = content.get("location", "")
            gps = content.get("gps", {})
            race_group = []
            for g in gps:
                race_group.append(g.get("name", ""))
            cover = content.get("cover", "")
            if cover:
                logo = self.download_pic(cover, race_id)
            else:
                logo = ""
            race_data = {"race_name": race_name, "race_date": race_date, "race_address": race_address,
                         "race_group": race_group, "logo": logo}
            result_dict = {"serialNumber": self.serialNumber, "webType": self.name_first,
                           "crawlerType": self.name_second, "data": str(race_data)}
            result = self.result_item_assembler(response)
            result['result_data'] = result_dict
            result['_dup_str'] = calc_str_md5(str(race_data))
            yield result
        except Exception:
            self.logger.error(f"解析赛事信息时出错：{traceback.format_exc()}")

    def download_pic(self, pic_url, match_id):
        try:
            self.logger.info(f"开始下载图片：{pic_url}")
            pic_md5 = calc_str_md5(pic_url)
            pic_name = f"{match_id}_{pic_md5}.jpg"
            dir_path = os.path.join(file_path, self.name_first)
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
            save_path = os.path.join(dir_path, pic_name)
            if not os.path.exists(save_path):
                urlretrieve(pic_url, save_path)
                upload_flag = upload_file(save_path, f"{self.name_first}/{pic_name}")
                if upload_flag:
                    self.logger.info(f"{save_path}上传成功")
                else:
                    self.logger.info(f"{save_path}上传失败")
            return f"图片:/{self.name_first}/{pic_name}"
        except Exception:
            self.logger.error(f"下载{pic_url}失败：{traceback.format_exc()}")
            return ""

    def err_parse(self, failure):
        self.logger.warning(f"请求失败：{failure.request.url},错误原因:{traceback.format_exc()}")
