# -*- coding:utf8 -*-
import json
import traceback
import os
import re
import typing
from urllib.parse import urlparse
from urllib.request import urlretrieve
from scrapy import Request
from squirrel_core.commons.utils.tools import calc_str_md5, upload_file
from squirrel_core.frame.spider_makaka import MakakaSpider

file_path = os.environ.get("FILE_PATH", "/")


class zuicool_race(MakakaSpider):
    name = "zuicool_race"
    url_dict = {"https://zuicool.com/events?type=run&regtype=newreg": "马拉松",
                "https://zuicool.com/events?type=trail-run&regtype=newreg": "越野跑"}
    header = {
        "Host": "zuicool.com",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:138.0) Gecko/20100101 Firefox/138.0",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
        "Accept-Encoding": "gzip, deflate, br, zstd"
    }
    serialNumber = ""
    need_ssdbstore_dup = True

    def get_ext_requests_or_urls(self, data=None):
        if data and isinstance(data, str):
            data = json.loads(data)
        if data and isinstance(data, typing.MutableMapping):
            self.serialNumber = data.get("serialNumber", "")
            self.upload_procedure({"serialNumber": self.serialNumber, "code": 100, "message": "任务启动成功"})
        for url, race_type in self.url_dict.items():
            yield Request(url=url, callback=self.parse, dont_filter=True, errback=self.err_parse,
                          headers=self.header, meta={"race_type": race_type})

    def parse(self, response, **kwargs):
        race_type = response.meta.get("race_type", "")
        self.logger.info(f"开始解析{race_type}赛事列表信息:{response.url}")
        try:
            race_url_list = response.xpath("//div[@class='section events-list']/div[@class='event']//a[@class='event-a']/@href").extract()
            for race_url in race_url_list:
                yield Request(race_url, callback=self.parse_race, headers=self.header,
                              errback=self.err_parse, meta={"race_type": race_type})
            next_page = response.xpath(".//li[contains(@class, 'next')]/a[not(@data-page)]/@href").extract_first()
            if next_page:
                yield Request(next_page, callback=self.parse, dont_filter=True,
                              errback=self.err_parse, meta={"race_type": race_type, "page_flag": True})
        except Exception:
            self.logger.info(f"解析{race_type}赛事列表信息时出错{response.url}：{traceback.format_exc()}")

    def parse_race(self, response):
        race_type = response.meta.get("race_type", "")
        self.logger.info(f"开始解析赛事信息:{response.url}")
        try:
            race_id = response.url.split("/")[-1]
            race_name = response.xpath("//div[@class='name']/h1/a/text()").extract_first()
            info = response.xpath("//div[@class='start_datetime-loc']/text()").extract_first()
            race_date = "".join(re.findall(r"(\d+[-/.]\d+[-/.]\d+)", info))
            info = info.replace(race_date, "")
            race_address = re.sub(r"[\s·.]+", "", info)
            describe = "".join(response.xpath("//div[@class='section event-desc_lead']//text()").extract()).strip()
            group_list = []
            div_list = response.xpath("//div[@class='pkgs-list']/div[@class='pkg2']")
            for div in div_list:
                content = div.xpath("./div[not(contains(@class, 'col-xs-3'))]//text()").extract()
                content = [c.strip() for c in content]
                content = list(filter(lambda x: x != "", content))
                group_list.append(content)
            race_data = {"race_type": race_type, "race_name": race_name, "race_date": race_date,
                         "race_address": race_address, "describe": describe, "group": group_list}
            information_url = response.xpath("//li[@class='wppost col-sm-6']/a/@href").extract_first()
            if information_url:
                yield Request(information_url, callback=self.parse_info, errback=self.err_parse,
                              meta={"race_data": race_data, "race_id": race_id})
            else:
                race_data["information"] = []
                result_dict = {"serialNumber": self.serialNumber, "webType": self.name_first,
                               "crawlerType": self.name_second, "data": str(race_data)}
                result = self.result_item_assembler(response)
                result['result_data'] = result_dict
                result['_dup_str'] = calc_str_md5(str(race_data))
                yield result
        except Exception:
            self.logger.info(f"解析赛事信息时出错{response.url}：{traceback.format_exc()}")

    def parse_info(self, response):
        race_data = response.meta.get("race_data", {})
        race_name = race_data.get("race_name", "")
        self.logger.info(f"开始解析{race_name}的咨询信息")
        race_id = response.meta.get("race_id", "")
        try:
            content_list = response.xpath("//h4[@class='entry-title']/text()").extract()
            d_list = response.xpath("//div[@class='entry-content']/p")
            for d in d_list:
                if d.xpath("./img"):
                    pic_list = d.xpath("./img/@src").extract()
                    for pic_url in pic_list:
                        if pic_url:
                            pic_txt = self.download_pic(pic_url, race_id)
                            content_list.append(pic_txt)
                        else:
                            pass
                else:
                    content_list.append("".join(d.xpath("string()").extract()).strip())
            content_list = list(filter(lambda c: c != "", content_list))
            race_data["information"] = content_list
            result_dict = {"serialNumber": self.serialNumber, "webType": self.name_first,
                           "crawlerType": self.name_second, "data": str(race_data)}
            result = self.result_item_assembler(response)
            result['result_data'] = result_dict
            result['_dup_str'] = calc_str_md5(str(race_data))
            yield result
        except Exception:
            self.logger.error(f"解析{race_name}的咨询信息时出错：{traceback.format_exc()}")

    def download_pic(self, pic_url, match_id):
        try:
            self.logger.info(f"开始下载图片：{pic_url}")
            path = urlparse(pic_url).path
            suffix = os.path.splitext(path)[1].lower()
            pic_md5 = calc_str_md5(pic_url)
            pic_name = f"{match_id}_{pic_md5}{suffix}"
            dir_path = os.path.join(file_path, self.name_first)
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
            save_path = os.path.join(dir_path, pic_name)
            if not os.path.exists(save_path):
                urlretrieve(pic_url, save_path)
                upload_flag = upload_file(save_path, f"{self.name_first}/{pic_name}")
                if upload_flag:
                    self.logger.info(f"{save_path}上传成功")
                else:
                    self.logger.info(f"{save_path}上传失败")
            return f"图片:/{self.name_first}/{pic_name}"
        except Exception:
            self.logger.error(f"下载{pic_url}失败：{traceback.format_exc()}")
            return ""

    def err_parse(self, failure):
        self.logger.warning(f"请求失败：{failure.request.url},错误原因:{traceback.format_exc()}")
