# -*- coding:utf8 -*-
import json
import traceback
from lxml import etree
import os
import typing
from urllib.parse import urlparse
from urllib.request import urlretrieve
from scrapy import Request
from squirrel_core.commons.utils.tools import calc_str_md5, upload_file
from squirrel_core.frame.spider_makaka import MakakaSpider
file_path = os.environ.get("FILE_PATH", "/")


class malamala_race(MakakaSpider):
    name = "malamala_race"
    race_list_url = 'https://user-gw.mararun.com/v2/match/searchByConditions?q={"cateID":["all"],"city":["china"],"date":["all"]}&limit=[0,500]&cateType=1'
    header = {
        "Host": "user-gw.mararun.com",
        "Content-Type": "application/json"
    }
    detail_header = {
        "Host": "user-gw.mararun.com",
        "Accept": "application/json, text/plain, */*"
    }
    need_ssdbstore_dup = True
    serialNumber = ""

    def get_ext_requests_or_urls(self, data=None):
        if data and isinstance(data, str):
            data = json.loads(data)
        if data and isinstance(data, typing.MutableMapping):
            self.serialNumber = data.get("serialNumber", "")
            self.upload_procedure({"serialNumber": self.serialNumber, "code": 100, "message": "任务启动成功"})
        yield Request(url=self.race_list_url, callback=self.parse, dont_filter=True, errback=self.err_parse,
                      headers=self.header, meta={"total_flag": True})

    def parse(self, response, **kwargs):
        try:
            self.logger.info(f"开始解析列表信息：{response.url}")
            data = json.loads(response.text)
            if response.meta.get("total_flag", False):
                total = data.get("total", 0)
                race_list_url = response.url.replace('500', str(total))
                yield Request(url=race_list_url, callback=self.parse, dont_filter=True, errback=self.err_parse,
                              headers=self.header)
            else:
                matches = data.get("matchs", [])
                for match in matches:
                    match_id = match.get("id", "")
                    detail_url = f"https://user-gw.mararun.com/v2/matchH5/detailSimple?matchID={match_id}"
                    yield Request(url=detail_url, callback=self.parse_detail, errback=self.err_parse,
                                  headers=self.detail_header)
        except Exception:
            self.logger.error(f"请求列表信息时出错：{traceback.format_exc()}")

    def parse_detail(self, response):
        try:
            self.logger.info(f"开始解析详情页：{response.url}")
            detail_data = json.loads(response.text)
            detail_content_url = detail_data.get("matchDesc", {}).get("detailOssUrl")
            yield Request(url=detail_content_url, callback=self.parse_content, dont_filter=True, priority=100,
                          errback=self.err_parse, meta={"detail_data": detail_data})
        except Exception:
            self.logger.error(f"请求请求详情时出错：{traceback.format_exc()}")

    def parse_content(self, response):
        try:
            self.logger.info(f"开始解析html页面：{response.url}")
            content_list = []
            detail_data = response.meta.get("detail_data", {})
            match_id = detail_data.get("match", {}).get("id", "")
            list_pic_url = detail_data.get("matchDesc", {}).get("listPicUrl", "")
            if list_pic_url:
                pic_txt = self.download_pic(list_pic_url, match_id)
                if pic_txt:
                    detail_data["matchDesc"]["listPicUrl"] = pic_txt
            detail_pic_url = detail_data.get("matchDesc", {}).get("detailPicUrl", "")
            if detail_pic_url:
                pic_txt = self.download_pic(detail_pic_url, match_id)
                if pic_txt:
                    detail_data["matchDesc"]["detailPicUrl"] = pic_txt
            tree = etree.HTML(response.text)
            d_list = tree.xpath("//h3|//p")
            for d in d_list:
                if d.xpath(".//img"):
                    pic_list = d.xpath(".//img/@src")
                    for pic_url in pic_list:
                        if pic_url:
                            pic_txt = self.download_pic(pic_url, match_id)
                            content_list.append(pic_txt)
                        else:
                            pass
                else:
                    content_list.append(d.xpath("string()"))
            r_data = {"api_data": detail_data, "html_data": content_list}
            result_dict = {"serialNumber": self.serialNumber, "webType": self.name_first,
                           "crawlerType": self.name_second, "data": str(r_data)}
            result = self.result_item_assembler(response)
            result['result_data'] = result_dict
            result['_dup_str'] = calc_str_md5(str(r_data))
            yield result
        except Exception:
            self.logger.error(f"解析html页面{response.url}时出错：{traceback.format_exc()}")

    def download_pic(self, pic_url, match_id):
        try:
            self.logger.info(f"开始下载图片：{pic_url}")
            path = urlparse(pic_url).path
            suffix = os.path.splitext(path)[1].lower()
            pic_md5 = calc_str_md5(pic_url)
            pic_name = f"{match_id}_{pic_md5}{suffix}"
            dir_path = os.path.join(file_path, self.name_first)
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
            save_path = os.path.join(dir_path, pic_name)
            if not os.path.exists(save_path):
                urlretrieve(pic_url, save_path)
                upload_flag = upload_file(save_path, f"{self.name_first}/{pic_name}")
                if upload_flag:
                    self.logger.info(f"{save_path}上传成功")
                else:
                    self.logger.info(f"{save_path}上传失败")
            return f"图片:/{self.name_first}/{pic_name}"
        except Exception:
            self.logger.error(f"下载{pic_url}失败：{traceback.format_exc()}")
            return ""

    def err_parse(self, failure):
        self.logger.warning(f"请求失败：{failure.request.url},错误原因:{traceback.format_exc()}")
