# -*- coding: utf-8 -*-
import datetime
import hashlib
import json
import math
import random
import re
import time

import requests
import scrapy
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta

from apps.listed_company.listed_company.items import (
    ListedCompanyItem,
    NetListedCompanyYearReportItem,
)
from loguru import logger

from components.config import NET_ROBOT_MYSQL_CONFIG
from utils.db.mysqldb import MysqlDB
from utils.time_tools import get_random_date_list
from utils.tools import urlencode, urldecode
import oss2


class SzseReportSpider(scrapy.Spider):
    listed_exchange = "深圳证券交易所"
    name = "szse_report"
    headers = {
        "Accept": "application/json, text/javascript, */*; q=0.01",
        "Accept-Language": "zh,zh-TW;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6",
        "Connection": "keep-alive",
        "Content-Type": "application/json",
        "Referer": "https://www.szse.cn/market/product/stock/list/index.html",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-origin",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
        "X-Request-Type": "ajax",
        "X-Requested-With": "XMLHttpRequest",
        "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126", "Google Chrome";v="126"',
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": '"Windows"',
    }
    bucket = oss2.Bucket(
        oss2.Auth("LTAI5tF8aeb3S3ypTRK9kxgt", "1pQGu6dS1NuEFh0pO5O0WjVGHJOWF6"),
        "oss-cn-hangzhou.aliyuncs.com",
        "wfq-gov-file",
    )
    to_db = MysqlDB(
        ip=NET_ROBOT_MYSQL_CONFIG["MYSQL_IP"],
        port=NET_ROBOT_MYSQL_CONFIG["MYSQL_PORT"],
        db=NET_ROBOT_MYSQL_CONFIG["MYSQL_DB"],
        user_name=NET_ROBOT_MYSQL_CONFIG["MYSQL_USER_NAME"],
        user_pass=NET_ROBOT_MYSQL_CONFIG["MYSQL_USER_PASS"],
    )

    def start_requests(self):

        url = "https://query.sse.com.cn/security/stock/queryCompanyBulletin.do"
        begin_date = datetime.date.today() - relativedelta(days=365 * 10)
        end_date = datetime.date.today()
        sql = "SELECT stock_code, stock_abb FROM `net_robot`.`net_listed_company_info` WHERE `listed_exchange` = '深圳证券交易所' LIMIT  2648,1000"
        stock_code_list = self.to_db.find(sql)
        for stock_code, stock_abb in stock_code_list:
            if stock_code.startswith("2"):
                continue
            url = "https://www.szse.cn/api/disc/announcement/annList"
            params = {"random": str(random.random())}
            data = {
                "seDate": [str(begin_date), str(end_date)],
                "stock": [stock_code],
                "channelCode": ["fixed_disc"],
                "pageSize": 50,
                "pageNum": 1,
            }
            json_data = json.dumps(data)
            yield scrapy.Request(
                url + "?" + urlencode(params),
                method="POST",
                body=json_data,
                callback=self.parse_list,
                headers=self.headers,
                meta={"data": data, "stock_abb": stock_abb, "stock_code": stock_code},
            )

    def parse_list(self, response, **kwargs):
        print(response.text)
        start_data = response.meta.get("data")
        stock_code = response.meta.get("stock_code")
        stock_abb = response.meta.get("stock_abb")
        # print(start_params)
        lines = json.loads(response.text)
        for line in lines["data"]:
            if (
                ("年度报告" in line["title"] or "年报" in line["title"])
                and "半年" not in line["title"]
                and "季度" not in line["title"]
                and "摘要" not in line["title"]
                and "已取消" not in line["title"]
            ):
                report_uuid = hashlib.md5(
                    line["attachPath"].encode("UTF-8")
                ).hexdigest()
                download_url = (
                    f"https://disc.static.szse.cn/download/{line['attachPath']}"
                )
                report_url = f"https://www.szse.cn/disclosure/listed/bulletinDetail/index.html?{line['id']}"
                save_path = f'https://wfq-gov-file.oss-cn-hangzhou.aliyuncs.com/listed_company_year_report/{stock_code}/{line["title"]}_{report_uuid}.pdf'

                item = NetListedCompanyYearReportItem(
                    **{
                        "stock_code": stock_code,
                        "stock_abb": stock_abb,
                        "report_uuid": report_uuid,
                        "report_name": line["title"],
                        "report_url": report_url,
                        "report_date": line["publishTime"].split()[0],
                        "report_file_data": {"oss_url": save_path},
                    }
                )
                sql = f"SELECT * FROM `net_robot`.`net_listed_company_year_report` WHERE `stock_code` = '{stock_code}' AND `report_uuid` = '{report_uuid}'"
                existed = self.to_db.find(sql)
                if existed:
                    logger.info(
                        f"{stock_code} 已存在: {line['title']} /{line['publishTime']}"
                    )
                    continue
                yield response.follow(
                    download_url, callback=self.parse_detail, cb_kwargs={"item": item}
                )
        pageSize = start_data["pageSize"]
        page_count = math.ceil(lines["announceCount"] / pageSize)

        if response.meta.get("is_next") is not False:
            if page_count > 1:
                for page_num in range(2, int(page_count) + 1):
                    url = "https://www.szse.cn/api/disc/announcement/annList"
                    params = {"random": str(random.random())}
                    data = start_data
                    data["pageNum"] = page_num
                    json_data = json.dumps(data)
                    logger.info(
                        f"{self.listed_exchange} {stock_abb} 分页: {page_num} /{page_count}"
                    )
                    yield scrapy.Request(
                        url + "?" + urlencode(params),
                        method="POST",
                        body=json_data,
                        callback=self.parse_list,
                        headers=self.headers,
                        meta={
                            "data": data,
                            "stock_abb": stock_abb,
                            "stock_code": stock_code,
                            "is_next": False,
                        },
                    )

    def parse_detail(self, response, **kwargs):
        item = kwargs.get("item")
        res = self.bucket.put_object(item["report_file_data"]["oss_url"], response.body)
        if res.status == 200:
            print(item, len(response.body))
            yield item
        else:
            raise Exception(res)


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl szse_report".split())
