# -*- coding:utf8 -*-
import json
import traceback
import os
import typing
import xml.etree.ElementTree as ET
from urllib.parse import urlparse
from scrapy import Request
from squirrel_core.commons.utils.tools import calc_str_md5, upload_file
from squirrel_core.frame.spider_makaka import MakakaSpider

file_path = os.environ.get("FILE_PATH", "/")


class tnf100_score(MakakaSpider):
    name = "tnf100_score"
    serialNumber = ""
    home_url = "https://{}/coureur.php?rech={}"
    rank_url = "https://timerbackend.geexek.com/initNewList.do"
    base_score_url = "https://timerbackend.geexek.com/rankingBoard.do"
    detail_url = "https://timerbackend.geexek.com/getScoreInfo.do"
    extend_url = "https://timerbackend.geexek.com/getScoreInfo_extend.do"

    header = {
        # "Host": "tnf100beijing.livetrail.run",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7"
    }
    cmpt_id_dict = {
        "2025若尔盖国家公园天边小路越野赛": "29469",
        "蒙泰·2025鄂尔多斯马拉松": "29507",
        "2025凯乐石东北100松花湖跑山赛": "29528",
        "2025九月凯乐石FUGA训练赛南京站": "29613",
        "2025“赛动黔景”贵州·毕节赫章阿西里西登山、越野双日赛": "29700",
        "2025麒麟小勇士障碍挑战赛·秋季赛": "29779"
    }

    def get_ext_requests_or_urls(self, data=None):
        if data and isinstance(data, str):
            data = json.loads(data)
        if data and isinstance(data, typing.MutableMapping):
            self.serialNumber = data.get("serialNumber", "")
            race_id = data.get("spider_config", {}).get("race_id", "")
            user_id = data.get("spider_config", {}).get("user_id", "")
            race_no = data.get("spider_config", {}).get("race_no", "")
            user_name = data.get("spider_config", {}).get("user_name", "")
            race_name = data.get("spider_config", {}).get("race_name", "").replace(" ", "")
            home_url = data.get("spider_config", {}).get("url", "")
            if not user_name and not race_no:
                self.logger.info("user_name和race_no必须存在")
                self.upload_procedure({"serialNumber": self.serialNumber, "code": 101, "message": "user_name和race_no必须存在"})
                self.close_after_idle = True
                self.force_to_close_spider = True
            else:
                self.upload_procedure({"serialNumber": self.serialNumber, "code": 100, "message": "任务启动成功"})
                domain = urlparse(home_url).netloc
                keyword = race_no or user_name
                if domain == "livetrail.net":
                    url = f"{home_url}coureur.php?rech={keyword}"
                else:
                    url = self.home_url.format(domain, keyword)
                self.header["Host"] = domain
                yield Request(url=url, callback=self.parse, dont_filter=True, errback=self.err_parse, headers=self.header,
                              meta={"race_id": race_id, "race_no": race_no, "user_id": user_id, "race_name": race_name,
                                    "user_name": user_name, "domain": domain})

    def parse(self, response, **kwargs):
        race_id = response.meta.get("race_id", "")
        user_id = response.meta.get("user_id", "")
        race_no = response.meta.get("race_no", "")
        race_name = response.meta.get("race_name", "")
        user_name = response.meta.get("user_name", "")
        domain = response.meta.get("domain", "")
        post_data = response.meta.get("post_data", {})
        try:
            self.logger.info(f"开始查找{race_no}的成绩:{response.url}")
            root = ET.fromstring(response.text)
            try:
                group_name = root.find('.//fiche/identite').get("cat")
            except:
                group_name = ""
            score_data = {"race_name": race_name, "itemName": group_name,
                          "name": user_name, "raceNo": race_no,
                          "pace": "",
                          "distance": group_name, "shotScore": "",
                          "score": ""}
            try:
                list_element = root.find('.//pts').findall('pt')
            except:
                list_element = []

            result = self.result_item_assembler(response)
            if list_element:
                locations = []
                for point in list_element:
                    location_info = {"cp_name": point.get('n'), "start_time": "", "score": "",
                                     "distance": point.get('km'), "pace": "", 'cp_id': point.get('idpt')}
                    locations.append(location_info)
                pass_element = root.find('.//pass').findall('e')
                for location in locations:
                    for pass_data in pass_element:
                        pass_id = pass_data.get("idpt")
                        start_time = pass_data.get("hd") or pass_data.get("ha")
                        stop_time = pass_data.get("tps")
                        hours, minutes, seconds = map(int, stop_time.split(':'))
                        pass_index = pass_element.index(pass_data)
                        if pass_index == 0:
                            pre_total_minutes = 0
                        else:
                            _hours, _minutes, _seconds = map(int, pass_element[pass_element.index(pass_data)-1].get("tps").split(':'))
                            pre_total_minutes = (_hours * 60 + _minutes + _seconds / 60) / 60
                        total_minutes = (hours * 60 + minutes + seconds / 60) / 60 - pre_total_minutes
                        if pass_id == location.get("cp_id"):
                            location["start_time"] = start_time
                            location["score"] = stop_time
                            if total_minutes:
                                distance = float(location["distance"])
                                pre_distance = float(locations[locations.index(location)-1]["distance"])
                                d = distance - pre_distance
                                pace = format(60 / (d/total_minutes), ".2f")
                                location["pace"] = f'{pace.split(".")[0]}:{int(int(pace.split(".")[1])/100 *60)}'
                            else:
                                location["pace"] = ""
                            break
                locations[-1]["cp_name"] = "FINISH"
                score_data["distance"] = locations[-1]["distance"]
                score_data["sectionScore"] = locations
                if locations[-1]["score"]:
                    s = ""
                    tps = ""
                    tpsr = ""
                    distance = locations[-1]["distance"]
                    cp_id = locations[-1]["cp_id"]
                    for pass_data in pass_element:
                        if pass_data.get("idpt") == cp_id:
                            tps = pass_data.get("tps")
                            hours, minutes, seconds = map(int, tps.split(':'))
                            h1 = (hours * 60 + minutes + seconds / 60) / 60
                            s = format(60 / (float(distance) / h1), ".2f")
                            tpsr = pass_data.get("tpsr")
                            hours, minutes, seconds = map(int, tps.split(':'))
                            h2 = (hours * 60 + minutes + seconds / 60) / 60
                            break
                    if "." in s:
                        score_data["pace"] = f'{s.split(".")[0]}:{int(int(s.split(".")[1])/100 *60)}'
                    score_data["shotScore"] = tpsr
                    score_data["score"] = tps
                    score_data["distance"] = distance
                if domain == "livetrail.net":
                    img_url = f"https://livetrail.net/live/dashudao/coureurDiplome.php?dossard={race_no}"
                    yield Request(url=img_url, callback=self.parse_pic, errback=self.err_parse, dont_filter=True,
                                  meta={"race_id": race_id, "user_name": user_name, "user_id": user_id,
                                        "score_data": score_data, "race_no": race_no, "download_timeout": 120})
                else:
                    result['result_data'] = {"serialNumber": self.serialNumber, "webType": self.name_first,
                                             "crawlerType": self.name_second, "data": str(score_data)}
                    yield result
            else:
                result['result_data'] = {"serialNumber": self.serialNumber, "webType": self.name_first,
                                         "crawlerType": self.name_second, "data": str({"msg": "未查到相关成绩信息"})}
                yield result
        except Exception:
            self.logger.info(f"查找{race_no}的成绩时出错{response.url}：{traceback.format_exc()}")

    def parse_pic(self, response):
        race_id = response.meta.get("race_id", "")
        user_id = response.meta.get("user_id", "")
        user_name = response.meta.get("user_name", "")
        race_no = response.meta.get("race_no", "")
        score_data = response.meta.get("score_data", {})
        try:
            self.logger.info(f"开始下载{user_name}的证书：{response.url}")
            pic_md5 = calc_str_md5(response.url)
            pic_name = f"{race_no}_{pic_md5}.pdf"
            dir_path = os.path.join(file_path, "picture", self.name_first)
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
            save_path = os.path.join(dir_path, pic_name)
            if not os.path.exists(save_path):
                with open(save_path, "wb") as f:
                    f.write(response.body)
                upload_path = f"flow/{race_id}/{user_id}/pic/{pic_name}"
                upload_flag = upload_file(save_path, upload_path)
                if upload_flag:
                    self.logger.info(f"{save_path}上传成功：{upload_path}")
                else:
                    self.logger.info(f"{save_path}上传失败：{upload_path}")
                score_data["certImg"] = upload_path
                result = self.result_item_assembler(response)
                result['result_data'] = {"serialNumber": self.serialNumber, "webType": self.name_first,
                                         "crawlerType": self.name_second, "data": str(score_data)}
                yield result
        except Exception:
            self.logger.info(f"下载{user_name}证书时出错{response.url}：{traceback.format_exc()}")

    def err_parse(self, failure):
        request = failure.request
        change = request.meta
        r_time = change.get("r_time", 1)
        self.logger.warning(f"请求失败：{failure.request.url},错误原因:{traceback.format_exc()},准备第{r_time}次重试")
        if r_time <= 3:
            r_time += 1
            try:
                change.update({"proxy_change": True, "r_time": r_time})
                yield request.replace(meta=change)
            except Exception:
                pass

