# -*- coding:utf8 -*-
import json
import traceback
import os
import re
import typing
import base64
import requests
from scrapy import Request
from ..signup.sign_chrome import SignChrome
from squirrel_core.commons.utils.tools import calc_str_md5, upload_file
from squirrel_core.frame.spider_makaka import MakakaSpider

file_path = os.environ.get("FILE_PATH", "/")


class irunner_score(MakakaSpider):
    name = "irunner_score"
    serialNumber = ""

    def get_ext_requests_or_urls(self, data=None):
        if data and isinstance(data, str):
            data = json.loads(data)
        if data and isinstance(data, typing.MutableMapping):
            self.serialNumber = data.get("serialNumber", "")
            race_id = data.get("spider_config", {}).get("race_id", "")
            user_id = data.get("spider_config", {}).get("user_id", "")
            race_no = data.get("spider_config", {}).get("race_no", "")
            race_item = data.get("spider_config", {}).get("race_item", "")
            user_name = data.get("spider_config", {}).get("user_name", "")
            race_name = data.get("spider_config", {}).get("race_name", "").replace(" ", "")
            home_url = data.get("spider_config", {}).get("url", "")
            card_num = data.get("spider_config", {}).get("card_num", "")
            if not home_url and not card_num:
                self.logger.info("url和card_num必须存在")
                self.upload_procedure({"serialNumber": self.serialNumber, "code": 101, "message": "url和card_num必须存在"})
                self.close_after_idle = True
                self.force_to_close_spider = True
            else:
                self.upload_procedure({"serialNumber": self.serialNumber, "code": 100, "message": "任务启动成功"})
                match_eng = "".join(re.findall(r"match_eng=(.*)?#", home_url)).strip()
                if match_eng:
                    url = f"https://m.irunner.mobi/runner-master/runner-score?identity={card_num}&match_eng={match_eng}"
                else:
                    match_eng = "".join(re.findall(r"matchs/(.*?)/", home_url)).strip()
                    url = f"https://m.irunner.mobi/matchs/{match_eng}/pannel/score-infos?card_number={card_num}"
                if match_eng:
                    yield Request(url=url, callback=self.parse, dont_filter=True, errback=self.err_parse,
                                  meta={"race_id": race_id, "user_id": user_id, "user_name": user_name, "card_num": card_num,
                                        "race_name": race_name, "home_url": home_url, "race_item": race_item})

    def parse(self, response, **kwargs):
        race_id = response.meta.get("race_id", "")
        user_id = response.meta.get("user_id", "")
        user_name = response.meta.get("user_name", "")
        race_name = response.meta.get("race_name", "")
        home_url = response.meta.get("home_url", "")
        card_num = response.meta.get("card_num", "")
        race_item = response.meta.get("race_item", "")
        proxy_url = response.request.meta.get("proxy", "")
        proxy_pre = response.request.meta.get("proxy_pre", "")
        try:
            self.logger.info(f"开始解析{user_name}的基本成绩:{response.url}")
            if "score-infos" in response.url:
                data = json.loads(response.text).get("info", {})
                if isinstance(data, str):
                    result_dict = {"serialNumber": self.serialNumber, "webType": self.name_first,
                                   "crawlerType": self.name_second, "data": str({"msg": "未查到相关成绩"})}
                    result = self.result_item_assembler(response)
                    result['result_data'] = result_dict
                    yield result
                else:
                    score_data = {"race_name": race_name, "itemName": data.get("course_name", race_item), "index": data.get("gun_total_rank", ""),
                                  "name": data.get("name", ""), "raceNo": data.get("bib_no", ""),
                                  "distance": "", "shotScore": data.get("result_gun", ""), "score_status": "",
                                  "score": data.get("result_net", ""), "sectionScore": [], "certImg": ""}
                    cret_url = data.get("digital_certificate", "")

                    yield Request(url=cret_url, callback=self.parse_pic, errback=self.err_parse, dont_filter=True,
                                  meta={"race_id": race_id, "user_id": user_id, "race_no": data.get("bib_no", ""),
                                        "score_data": score_data})
            else:
                data = json.loads(response.text).get("score_info", {})
                if data:
                    race_code = data.get("bib_no", "")
                    score_data = {"race_name": race_name, "itemName": data.get("course_name", ""), "index": "", "name": data.get("name", ""), "raceNo": race_code,
                                  "distance": "", "shotScore": data.get("result_gun", ""), "score_status": "",
                                  "score": data.get("result_net", ""), "sectionScore": [], "certImg": ""}
                    i = 0
                    while i < 3:
                        try:
                            dir_path = os.path.join(file_path, "picture", self.name_first)
                            if not os.path.exists(dir_path):
                                os.makedirs(dir_path)
                            img_base64 = SignChrome().get_pic3(home_url, card_num, proxy_url, proxy_pre)
                            image_data = base64.b64decode(img_base64.split(",", 1)[1])
                            pic_name = f'{calc_str_md5(f"{race_code}_{race_id}")}.jpg'
                            save_path = os.path.join(dir_path, pic_name)
                            if not os.path.exists(save_path):
                                with open(save_path, "wb") as f:
                                    f.write(image_data)
                                upload_path = f"flow/{race_id}/{user_id}/pic/{pic_name}"
                                upload_flag = upload_file(save_path, upload_path)
                                if upload_flag:
                                    self.logger.info(f"{save_path}上传成功：{upload_path}")
                                else:
                                    self.logger.info(f"{save_path}上传失败：{upload_path}")
                                score_data["certImg"] = upload_path
                                result_dict = {"serialNumber": self.serialNumber, "webType": self.name_first,
                                               "crawlerType": self.name_second, "data": str(score_data)}
                                result = self.result_item_assembler(response)
                                result['result_data'] = result_dict
                                yield result
                                break
                        except Exception:
                            i += 1
                            proxy_url = self.get_proxy() if self.use_proxy else ""
                    else:
                        result_dict = {"serialNumber": self.serialNumber, "webType": self.name_first,
                                       "crawlerType": self.name_second, "data": str(score_data)}
                        result = self.result_item_assembler(response)
                        result['result_data'] = result_dict
                        yield result
                else:
                    result = self.result_item_assembler(response)
                    result['result_data'] = {"serialNumber": self.serialNumber, "webType": self.name_first,
                                             "crawlerType": self.name_second, "data": str({"msg": "未查到成绩"})}
                    yield result
        except Exception:
            self.logger.info(f"查找{user_name}的基本成绩时出错{response.url}：{traceback.format_exc()}")

    def parse_pic(self, response):
        race_id = response.meta.get("race_id", "")
        user_id = response.meta.get("user_id", "")
        race_no = response.meta.get("race_no", "")
        score_data = response.meta.get("score_data", {})
        try:
            self.logger.info(f"开始下载{race_no}的证书：{response.url}")
            pic_md5 = calc_str_md5(response.url)
            pic_name = f"{race_no}_{pic_md5}.jpg"
            dir_path = os.path.join(file_path, "picture", self.name_first)
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
            save_path = os.path.join(dir_path, pic_name)
            if not os.path.exists(save_path):
                with open(save_path, "wb") as f:
                    f.write(response.body)
                upload_path = f"flow/{race_id}/{user_id}/pic/{pic_name}"
                upload_flag = upload_file(save_path, upload_path)
                if upload_flag:
                    self.logger.info(f"{save_path}上传成功：{upload_path}")
                else:
                    self.logger.info(f"{save_path}上传失败：{upload_path}")
                score_data["certImg"] = upload_path
                result = self.result_item_assembler(response)
                result['result_data'] = {"serialNumber": self.serialNumber, "webType": self.name_first,
                                         "crawlerType": self.name_second, "data": str(score_data)}
                yield result
        except Exception:
            self.logger.info(f"下载{race_no}证书时出错{response.url}：{traceback.format_exc()}")

    def err_parse(self, failure):
        self.logger.warning(f"请求失败：{failure.request.url},错误原因:{traceback.format_exc()}")

    def get_proxy(self):
        url = "http://api.dmdaili.com/dmgetip.asp?apikey=062c68ee&pwd=2ceacf762585db3e1d24cbdb4ef11091&getnum=1&httptype=1&geshi=2&fenge=&fengefu=&operate=all"
        content = requests.get(url).text
        ip = json.loads(content).get("data", [])[0].get("ip", "")
        port = json.loads(content).get("data", [])[0].get("port", "")
        proxy = f"http://{ip}:{port}"
        return proxy
