# -*- coding:utf8 -*-
import json
import traceback
import os
import re
import copy
import typing
import requests
from datetime import datetime
import xml.etree.ElementTree as ET
from scrapy import Request
from squirrel_core.commons.utils.tools import calc_str_md5, upload_file
from squirrel_core.frame.spider_makaka import MakakaSpider

file_path = os.environ.get("FILE_PATH", "/")


class runff_pic(MakakaSpider):
    name = "runff_pic"
    serialNumber = ""
    all_flag = False
    need_ssdbstore_dup = True
    header = {
        "Host": "www.runff.com",
        "X-Requested-With": "XMLHttpRequest",
        "User-Agent": "Mozilla/5.0 (Linux; Android 12; Redmi K30 Build/SKQ1.210908.001; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/138.0.7204.63 Mobile Safari/537.36 XWEB/1380045 MMWEBSDK/20250503 MMWEBID/346 MicroMessenger/8.0.61.2880(0x28003D3C) WeChat/arm64 Weixin NetType/4G Language/zh_CN ABI/arm64",
        "Accept": "text/plain, */*; q=0.01",
        "Content-Type": "application/xml",
        "Accept-Encoding": "gzip, deflate, br, zstd",
        "Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
        "Cookie": ""
    }
    home_url = "https://www.runff.com/d/aYnaYr"
    vid_url = "https://www.runff.com/html/apivideo/api.ashx?siteid=1282&isbxapimode=true"
    video_url = "https://www.runff.com/html/apivideo/api.ashx?siteid=1282&method=userfiles&action=order&orderid={}"
    base_url = ""
    cookie_dict = {"bxmssmemberinfo": ""}
    data0 = """<?xml version="1.0" encoding="utf-8"?><BxMessage><AppId>BxAPI</AppId><Type>1</Type><Action>follow</Action><Data><action>add</action><number>{r_no}</number><faceurl></faceurl><name></name><id>0</id></Data></BxMessage>"""
    data1 = """<?xml version="1.0" encoding="utf-8"?><BxMessage><AppId>BxAPI</AppId><Type>1</Type><Action>getNumberList</Action><Data><display>normal</display></Data></BxMessage>"""
    data2 = """<?xml version="1.0" encoding="utf-8"?><BxMessage><AppId>BxAPI</AppId><Type>1</Type><Action>getPhotoList</Action><Data><fid>{f_id}</fid><number>{r_no}</number><faceUrl></faceUrl><pageindex>{page_no}</pageindex><time></time><sign>false</sign><pagesize>200</pagesize><display>normal</display></Data></BxMessage>"""
    data3 = """<?xml version="1.0" encoding="utf-8"?><BxMessage><AppId>BxAPI</AppId><Type>1</Type><Action>follow</Action><Data><action>add</action><number></number><faceurl>{pic_base64}?amp;mid2=0</faceurl><name></name><id>0</id></Data></BxMessage>"""
    data4 = """<?xml version="1.0" encoding="utf-8"?><BxMessage><AppId>BxAPI</AppId><Type>1</Type><Action>buy</Action><Data><projectid>4877</projectid><videos>[{vid:%s,pid:%s,offset:0,duration:%s,type:"free"}]</videos><openid></openid><qrmode>false</qrmode><paytype>mini</paytype><action>prepay</action></Data></BxMessage>"""
    data5 = """<?xml version="1.0" encoding="utf-8"?><BxMessage><AppId>BxAPI</AppId><Type>1</Type><Action>follow</Action><Data><action>cancel</action><id>%s</id><number></number></Data></BxMessage>"""
    cmpt_id_dict = {
        "2025六盘水马拉松": "4853",
        "蒙泰·2025鄂尔多斯马拉松": "4865",
        "2025凯乐石东北100松花湖跑山赛": "4870",
        "2025哈尔滨马拉松": "4876",
        "2025崇礼云顶跑山赛": "4877",
        "2025云南凤庆滇红越野赛": "4897",
        "2025“赛动黔景”贵州·毕节赫章阿西里西登山、越野双日赛": "4899",
        "2025沈阳马拉松": "4900",
        "2025太原马拉松": "4909"
    }
    specific_settings = {
        'COOKIES_ENABLED': False,
        'HTTPERROR_ALLOWED_CODES': [302]
    }
    handle_httpstatus_list = [302]
    send_flag = False

    def get_ext_requests_or_urls(self, data=None):
        if data and isinstance(data, str):
            data = json.loads(data)
        if data and isinstance(data, typing.MutableMapping):
            self.serialNumber = data.get("serialNumber", "")
            self.all_flag = data.get("spider_config", {}).get("all", False)
            race_id = data.get("spider_config", {}).get("race_id", "")
            user_id = data.get("spider_config", {}).get("user_id", "")
            race_no = data.get("spider_config", {}).get("race_no", "")
            race_name = data.get("spider_config", {}).get("race_name", "").replace(" ", "")
            pic_base64 = data.get("spider_config", {}).get("pic_base64", "")
            home_url = data.get("spider_config", {}).get("url", "")
            cookie = data.get("spider_config", {}).get("cookie", "userinfo=0065006D004100450069006C0067006C0076006B0035004C0078004C0065006100790048005A006100480077004A007A004F004900310062007A0038003000700069004E002F00320072006500620034004500590065006B005900680035004D0071004D002F0066006900570037004600440048006E0057006800420050002B")
            self.header["Cookie"] = f"bxmssmemberinfo={cookie}"
            self.cookie_dict["bxmssmemberinfo"] = cookie
            if "2025崇礼云顶跑山赛" in race_name:
                race_name = "2025崇礼云顶跑山赛"
            if "www.runff.com" in home_url:
                cmpt = "".join(re.findall(r"/s(\d+).html", home_url)).strip()
            else:
                cmpt = self.cmpt_id_dict.get(race_name, "")
            self.base_url = f"https://www.runff.com/html/live/s{cmpt}.html?isbxapimode=true"

            if not race_no and not race_name and not pic_base64:
                self.logger.info("参赛号，头像和赛事名称必须存在")
                self.upload_procedure({"serialNumber": self.serialNumber, "code": 101, "message": "参赛号,头像和赛事名称必须存在，请仔细核对"})
                self.close_after_idle = True
                self.force_to_close_spider = True
            else:
                self.upload_procedure({"serialNumber": self.serialNumber, "code": 100, "message": "任务启动成功"})
                if pic_base64:
                    if not pic_base64.startswith("data:image"):
                        pic_base64 = f"data:image/jpeg;base64,{pic_base64}"
                    data = self.data3.format(pic_base64=pic_base64)
                    yield Request(url=self.base_url, callback=self.parse_face, dont_filter=True, errback=self.err_parse,
                                  method='POST', body=data, headers=self.header, cookies=self.cookie_dict,
                                  meta={"race_id": race_id, "race_no": race_no, "user_id": user_id})
                yield Request(url=self.base_url, callback=self.parse, dont_filter=True, errback=self.err_parse,
                              method='POST', body=self.data0.format(r_no=race_no), headers=self.header, cookies=self.cookie_dict,
                              meta={"race_id": race_id, "race_no": race_no, "user_id": user_id, "cmpt": cmpt})

    def parse_face(self, response, **kwargs):
        race_id = response.meta.get("race_id", "")
        user_id = response.meta.get("user_id", "")
        race_no = response.meta.get("race_no", "")
        cmpt = response.meta.get("cmpt", "")
        if "请登录" in response.text:
            msg = "未登录，请先登录或联系开发者"
            current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            payload = {
                "msgtype": "text",
                "text": {
                    "content": f"【登录提醒：{current_time}】\n跑步维生素登录过期，需重新登录！",
                    "mentioned_list": ["@all"]
                }
            }
            try:
                wx_url = "https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=51e9de35-67d4-4d07-841d-fdeec8fbea3c"
                requests.post(wx_url, headers={"Content-Type": "text/plain"}, data=json.dumps(payload), timeout=10)
            except Exception as e:
                self.logger.error(f"代理异常监控信息发送失败, 原因: {e}")
            self.close_after_idle = True
            self.force_to_close_spider = True
            self.send_flag = True
        if not self.send_flag:
            try:
                self.logger.info(f"开始解析：{response.url}")
                root = ET.fromstring(response.text)
                list_element = root.find('.//Data/id')
                if list_element is not None and list_element.text:
                    face_id = json.loads(list_element.text)
                    data2 = copy.deepcopy(self.data2)
                    xml_data = data2.format(f_id=face_id, r_no=race_no, page_no=1)
                    yield Request(url=self.base_url, callback=self.parse_list, dont_filter=True, errback=self.err_parse,
                                  method='POST', body=xml_data, headers=self.header, cookies=self.cookie_dict,
                                  meta={"race_id": race_id, "race_no": race_no, "user_id": user_id, "f_id": face_id, "download_delay": 2.0, "cmpt": cmpt})

            except Exception:
                self.logger.info(f"解析{response.url}时出错：{traceback.format_exc()}")

    def parse(self, response, **kwargs):
        race_id = response.meta.get("race_id", "")
        user_id = response.meta.get("user_id", "")
        race_no = response.meta.get("race_no", "")
        cmpt = response.meta.get("cmpt", "")
        if "登录" in response.text:
            msg = "未登录，请先登录或联系开发者"
            current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            payload = {
                "msgtype": "text",
                "text": {
                    "content": f"【登录提醒：{current_time}】\n跑步维生素登录过期，需重新登录！",
                    "mentioned_list": ["@all"]
                }
            }
            try:
                wx_url = "https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=51e9de35-67d4-4d07-841d-fdeec8fbea3c"
                requests.post(wx_url, headers={"Content-Type": "text/plain"}, data=json.dumps(payload), timeout=10)
            except Exception as e:
                self.logger.error(f"代理异常监控信息发送失败, 原因: {e}")
            self.close_after_idle = True
            self.force_to_close_spider = True
            self.send_flag = True
        if not self.send_flag:
            try:
                self.logger.info(f"开始解析：{response.url}")
                root = ET.fromstring(response.text)
                msg = root.find('.//Message').text
                if "重复关注" in msg:
                    yield Request(url=self.base_url, callback=self.parse_reg, dont_filter=True, errback=self.err_parse,
                                  method='POST', body=self.data1, headers=self.header, cookies=self.cookie_dict,
                                  meta={"race_id": race_id, "race_no": race_no, "user_id": user_id, "cmpt": cmpt})
                else:
                    f_id = root.find('.//Data/id').text
                    data2 = copy.deepcopy(self.data2)
                    xml_data = data2.format(f_id=f_id, r_no=race_no, page_no=1)
                    yield Request(url=self.base_url, callback=self.parse_list, dont_filter=True, errback=self.err_parse,
                                  method='POST', body=xml_data, headers=self.header, cookies=self.cookie_dict,
                                  meta={"race_id": race_id, "race_no": race_no, "user_id": user_id, "f_id": f_id, "download_delay": 2.0, "cmpt": cmpt})

            except Exception:
                self.logger.info(f"解析{response.url}时出错：{traceback.format_exc()}")

    def parse_reg(self, response, **kwargs):
        race_id = response.meta.get("race_id", "")
        user_id = response.meta.get("user_id", "")
        race_no = response.meta.get("race_no", "")
        cmpt = response.meta.get("cmpt", "")
        try:
            self.logger.info(f"开始解析：{response.url}")
            root = ET.fromstring(response.text)
            list_element = root.find('.//Data/list')
            if list_element is not None and list_element.text:
                data_list = json.loads(list_element.text)
                for data in data_list:
                    if data.get("number", "") == race_no:
                        f_id = data.get("id", "")
                        data2 = copy.deepcopy(self.data2)
                        xml_data = data2.format(f_id=f_id, r_no=race_no, page_no=1)
                        yield Request(url=self.base_url, callback=self.parse_list, dont_filter=True, errback=self.err_parse,
                                      method='POST', body=xml_data, headers=self.header, cookies=self.cookie_dict,
                                      meta={"race_id": race_id, "race_no": race_no, "user_id": user_id, "f_id": f_id, "download_delay": 2.0, "cmpt": cmpt})

        except Exception:
            self.logger.info(f"解析{response.url}时出错：{traceback.format_exc()}")

    def parse_list(self, response):
        race_id = response.meta.get("race_id", "")
        user_id = response.meta.get("user_id", "")
        race_no = response.meta.get("race_no", "")
        f_id = response.meta.get("f_id", "")
        page_no = response.meta.get("page_no", 1)
        cmpt = response.meta.get("cmpt", "")
        try:
            if "查询中..." in response.text:
                yield response.request
            else:
                self.logger.info(f"开始解析：{response.url}")
                root = ET.fromstring(response.text)
                list_element = root.find('.//Data/list')
                video_element = root.find('.//Data/videos')
                data_list = []
                if list_element is not None and list_element.text:
                    data_list.extend(json.loads(list_element.text))
                if video_element is not None and video_element.text:
                    data_list.extend(json.loads(video_element.text))
                if data_list:
                    for data in data_list:
                        pic_link = data.get("big", "")
                        if pic_link:
                            if pic_link.startswith("/upload"):
                                pic_url = f"http://p.chinarun.com{pic_link}"
                            else:
                                pic_url = f"http:{pic_link}"
                            yield Request(url=pic_url, callback=self.parse_pic, errback=self.err_parse, dont_filter=self.all_flag,
                                          meta={"race_id": race_id, "race_no": race_no, "user_id": user_id, "use_proxy": False})
                            v_id = data.get("vid", "")
                            if v_id:
                                duration = data.get("duration", "")
                                data4 = copy.deepcopy(self.data4)
                                xml_data = data4 % (v_id, cmpt, duration)
                                yield Request(url=self.vid_url, callback=self.parse_vid, dont_filter=True,
                                              errback=self.err_parse,
                                              method='POST', body=xml_data, headers=self.header, cookies=self.cookie_dict,
                                              meta={"race_id": race_id, "race_no": race_no, "user_id": user_id,
                                                    "f_id": f_id, "download_delay": 2.0, "cmpt": cmpt})

                    if page_no > 1:
                        total = int(root.find('.//Data/total').text)
                        if total % 200 == 0:
                            pages = total // 200
                        else:
                            pages = total // 200 + 1
                        for page in range(2, pages + 1):
                            data2 = copy.deepcopy(self.data2)
                            xml_data = data2.format(f_id=f_id, r_no=race_no, page_no=page)
                            yield Request(url=self.base_url, callback=self.parse_list, dont_filter=True, errback=self.err_parse,
                                          method='POST', body=xml_data, headers=self.header, cookies=self.cookie_dict,
                                          meta={"race_id": race_id, "race_no": race_no, "user_id": user_id, "f_id": f_id, "page_no": page, "cmpt": cmpt})
                    data5 = copy.deepcopy(self.data5)
                    xml_data = data5 % f_id
                    yield Request(url=self.base_url, callback=self.parse_unbind, dont_filter=True, errback=self.err_parse,
                                  method='POST', body=xml_data, headers=self.header, cookies=self.cookie_dict,
                                  meta={"race_no": race_no})

                else:
                    result = self.send_data(resp=response, serial_number=self.serialNumber,
                                            user_id=user_id, race_id=race_id)
                    yield result

        except Exception:
            self.logger.info(f"解析{response.url}时出错：{traceback.format_exc()}")

    def parse_vid(self, response):
        race_id = response.meta.get("race_id", "")
        user_id = response.meta.get("user_id", "")
        race_no = response.meta.get("race_no", "")
        f_id = response.meta.get("f_id", "")
        cmpt = response.meta.get("cmpt", "")
        try:
            if "查询中..." in response.text:
                yield response.request
            else:
                self.logger.info(f"开始解析：{response.url}")
                root = ET.fromstring(response.text)
                data_element = root.find('.//Data/data')
                if data_element is not None and data_element.text:
                    pay_id = json.loads(data_element.text).get("payid", "")
                    if pay_id:
                        status = self.server.set_add(f'{self.name}:storedupefilter', calc_str_md5(str(pay_id)))
                        if self.all_flag:
                            status = True
                        if not status:
                            self.logger.info("重复视频")
                        else:
                            video_url = self.video_url.format(pay_id)
                            yield Request(url=video_url, callback=self.parse_video_url, dont_filter=self.all_flag,
                                          errback=self.err_parse, headers=self.header, cookies=self.cookie_dict,
                                          meta={"race_id": race_id, "race_no": race_no, "user_id": user_id,
                                                "f_id": f_id, "download_delay": 2.0, "cmpt": cmpt})
        except Exception:
            self.logger.info(f"解析{response.url}时出错：{traceback.format_exc()}")

    def parse_video_url(self, response):
        race_id = response.meta.get("race_id", "")
        user_id = response.meta.get("user_id", "")
        race_no = response.meta.get("race_no", "")
        try:
            file_list = json.loads(response.text).get("data", {}).get("files", [])
            for data in file_list:
                video_id = data.get("id", "")
                url = f'http:{data.get("url", "")}'
                yield Request(url=url, callback=self.parse_video, errback=self.err_parse, dont_filter=self.all_flag,
                              meta={"race_id": race_id, "race_no": race_no, "user_id": user_id, "video_id": video_id,
                                    "download_delay": 2.0, "max_retry_times": 2, "keep_alive": True, "use_proxy": False})
        except Exception:
            self.logger.info(f"解析{response.url}时出错：{traceback.format_exc()}")

    def parse_pic(self, response):
        race_id = response.meta.get("race_id", "")
        user_id = response.meta.get("user_id", "")
        race_no = response.meta.get("race_no", "")
        try:
            self.logger.info(f"开始下载照片：{response.url}")
            pic_md5 = calc_str_md5(response.url)
            pic_name = f"{race_no}_{pic_md5}.jpg"
            dir_path = os.path.join(file_path, "picture", self.name_first)
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
            save_path = os.path.join(dir_path, pic_name)
            if not os.path.exists(save_path):
                with open(save_path, "wb") as f:
                    f.write(response.body)
                upload_path = f"flow/{race_id}/{user_id}/pic/{pic_name}"
                upload_flag = upload_file(save_path, upload_path)
                if upload_flag:
                    self.logger.info(f"{save_path}上传成功：{upload_path}")
                else:
                    self.logger.info(f"{save_path}上传失败：{upload_path}")
                result_dict = {"pic_name": pic_name, "pic_type": "jpg", "url_address": upload_path, "race_id": race_id}
                result = self.send_data(resp=response, serial_number=self.serialNumber, result_data=result_dict,
                                        user_id=user_id, race_id=race_id, dup_str=str(result_dict))
                yield result
        except Exception:
            self.logger.info(f"下载照片时出错{response.url}：{traceback.format_exc()}")

    def parse_video(self, response):
        race_id = response.meta.get("race_id", "")
        user_id = response.meta.get("user_id", "")
        race_no = response.meta.get("race_no", "")
        video_id = response.meta.get("video_id", "")
        try:
            self.logger.info(f"开始下载视频：{response.url}")
            pic_md5 = calc_str_md5(str(video_id))
            pic_name = f"{race_no}_{pic_md5}.mp4"
            dir_path = os.path.join(file_path, "picture", self.name_first)
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
            save_path = os.path.join(dir_path, pic_name)
            if not os.path.exists(save_path):
                with open(save_path, "wb") as f:
                    f.write(response.body)
                upload_path = f"flow/{race_id}/{user_id}/pic/{pic_name}"
                upload_flag = upload_file(save_path, upload_path)
                if upload_flag:
                    self.logger.info(f"{save_path}上传成功：{upload_path}")
                else:
                    self.logger.info(f"{save_path}上传失败：{upload_path}")
                result_dict = {"pic_name": pic_name, "pic_type": "mp4", "url_address": upload_path, "race_id": race_id}

                result = self.send_data(resp=response, serial_number=self.serialNumber, result_data=result_dict,
                                        user_id=user_id, race_id=race_id, dup_str=str(result_dict))
                yield result
        except Exception:
            self.logger.info(f"下载视频时出错{response.url}：{traceback.format_exc()}")

    def parse_unbind(self, response):
        race_no = response.meta.get("race_no", "")
        try:
            root = ET.fromstring(response.text)
            msg = root.find('.//Message').text
            self.logger.info(f"取消关注{race_no}：{msg}")
        except Exception:
            self.logger.info(f"取消关注{race_no}时出错：{traceback.format_exc()}")

    def send_data(self, resp=None, serial_number=None, result_data=None, user_id=None, race_id=None, dup_str=None):
        if result_data is None:
            result_data = {"msg": "未查到照片信息"}
        result_dict = {"serialNumber": serial_number, "webType": self.name_first,
                       "userId": user_id, "raceId": race_id, "code": 200,
                       "crawlerType": self.name_second, "data": str(result_data)}
        result = self.result_item_assembler(resp)
        result['result_data'] = result_dict
        if not self.all_flag and dup_str is not None:
            result['_dup_str'] = calc_str_md5(dup_str)
        return result

    def err_parse(self, failure):
        self.logger.warning(f"请求失败：{failure.request.url},错误原因:{traceback.format_exc()}")
