# -*- coding: utf-8 -*-
import datetime
import json
import re
from decimal import Decimal

import pandas as pd
import scrapy
from loguru import logger

from apps.listed_company.listed_company.items import NetMoneySupply
from components.config import NET_ROBOT_MYSQL_CONFIG
from utils.db.mysqldb import MysqlDB


class MoneySupplySpider(scrapy.Spider):
    listed_exchange = "中国人民银行"
    name = "money_supply"
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "Accept-Language": "zh,zh-TW;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6",
        "Cache-Control": "max-age=0",
        "Proxy-Connection": "keep-alive",
        "Referer": "http://www.pbc.gov.cn/diaochatongjisi/116219/index.html",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36",
    }
    to_db = MysqlDB(
        ip=NET_ROBOT_MYSQL_CONFIG["MYSQL_IP"],
        port=NET_ROBOT_MYSQL_CONFIG["MYSQL_PORT"],
        db=NET_ROBOT_MYSQL_CONFIG["MYSQL_DB"],
        user_name=NET_ROBOT_MYSQL_CONFIG["MYSQL_USER_NAME"],
        user_pass=NET_ROBOT_MYSQL_CONFIG["MYSQL_USER_PASS"],
    )
    custom_settings = {
        "DUPEFILTER_DEBUG": True,
        "DUPEFILTER_CLASS": "scrapy.dupefilters.BaseDupeFilter",
    }

    def start_requests(self):
        url = "http://www.pbc.gov.cn/diaochatongjisi/116219/116319/index.html"
        yield scrapy.Request(url, callback=self.parse_list, headers=self.headers)

    def parse_list(self, response, **kwargs):
        print(111, response.text[:100])

        year_list = response.xpath('//*[@id="r_con"]//div[@class="wengao2"]')
        year_data = {
            i.xpath(".//a/text()").get(): i.xpath(".//a/@href").get() for i in year_list
        }

        for year, href in year_data.items():
            if str(datetime.date.today().year) in year:
                url = f"http://www.pbc.gov.cn{href}"
                yield scrapy.Request(
                    url, callback=self.parse_list2, headers=self.headers
                )
                break

    def parse_list2(self, response, **kwargs):
        print(111, response.text[:100])
        href = re.findall(r'<a href="(.*?)" class="lan14cu">货币统计概览', response.text)[0]

        yield scrapy.Request(
            f"http://www.pbc.gov.cn{href}",
            callback=self.parse_list3,
            headers=self.headers,
        )

    def parse_list3(self, response, **kwargs):
        print(111, response.text[:100])

        trs = response.xpath('//*[@id="con"]//table[@class="a2015"]//tr')
        for tr in trs:
            if "货币供应量" in tr.xpath("string(.)").get():
                hrefs = tr.xpath(".//a/@href").getall()
                for href in hrefs:
                    if "htm" in href:
                        yield scrapy.Request(
                            f"http://www.pbc.gov.cn{href}",
                            callback=self.parse_list4,
                            headers=self.headers,
                        )

    def parse_list4(self, response, **kwargs):
        print(111, response.text[:100])
        df = pd.read_html(response.text)[0]
        df = df.drop(
            [0, 1],
            axis=1,
        ).loc[[5, 7, 9, 11]]
        # 将第一行作为表头（列名）
        df.columns = df.iloc[0]

        # 将第一列作为行索引（列头）
        df.set_index(df.columns[0], inplace=True)
        # 删除第一行
        df = df.drop(
            ["项目 Item"],
            axis=0,
        )
        lines = df.to_dict()
        for time, data in lines.items():
            if not pd.isna(data["货币和准货币（M2）"]):
                date_obj = datetime.datetime.strptime(time.replace(".", ""), "%Y%m")
                # 计算去年的同月
                last_year_date = date_obj.replace(year=date_obj.year - 1)
                # 格式化输出为 "yyyyMM" 形式
                last_year_str = last_year_date.strftime("%Y%m")
                sql = f"SELECT m2_supply_final_value, m1_supply_final_value, m0_supply_final_value FROM `net_robot`.`net_money_supply` WHERE `time` = '{last_year_str}'"
                last_year_data = self.to_db.find(sql, to_json=True)[0]
                m2 = Decimal(data["货币和准货币（M2）"])
                m1 = Decimal(data["货币（M1）"])
                m0 = Decimal(data["流通中货币（M0）"])
                last_m2 = last_year_data["m2_supply_final_value"]
                last_m1 = last_year_data["m1_supply_final_value"]
                last_m0 = last_year_data["m0_supply_final_value"]
                item = NetMoneySupply(
                    **{
                        "time": time.replace(".", ""),
                        "m2_supply_final_value": m2,
                        "m2_yoy_increase": ((m2 - last_m2) / last_m2).quantize(
                            Decimal("0.001")
                        )
                        * 100,
                        "m1_supply_final_value": m1,
                        "m1_yoy_increase": ((m1 - last_m1) / last_m1).quantize(
                            Decimal("0.001")
                        )
                        * 100,
                        "m0_supply_final_value": m0,
                        "m0_yoy_increase": ((m0 - last_m0) / last_m0).quantize(
                            Decimal("0.001")
                        )
                        * 100,
                    }
                )
                # print(item)
                yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl money_supply".split())
