# -*- coding: utf-8 -*-
import datetime
import hashlib
import json
import re
import time

import requests
import scrapy
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta

from apps.listed_company.listed_company.items import ListedCompanyItem, NetListedCompanyYearReportItem
from loguru import logger

from components.config import NET_ROBOT_MYSQL_CONFIG
from utils.db.mysqldb import MysqlDB
from utils.time_tools import get_random_date_list
from utils.tools import urlencode, urldecode
import oss2


class BseReportSpider(scrapy.Spider):
    listed_exchange = '北京证券交易所'
    name = 'bse_report'
    headers = {
        "Accept": "text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01",
        "Accept-Language": "zh,zh-TW;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6",
        "Connection": "keep-alive",
        "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
        "Origin": "https://www.bse.cn",
        "Referer": "https://www.bse.cn/disclosure/announcement.html",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-origin",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
        "X-Requested-With": "XMLHttpRequest",
        "sec-ch-ua": "\"Not)A;Brand\";v=\"99\", \"Google Chrome\";v=\"127\", \"Chromium\";v=\"127\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Windows\""
    }
    cookies = {
        "HMACCOUNT": "A8AA8D522A6ABBC0",
        "Hm_lvt_ef6193a308904a92936b38108b93bd7f": "1723107980",
        "Hm_lpvt_ef6193a308904a92936b38108b93bd7f": "1723688216",
        "C3VK": "a12b02"
    }
    bucket = oss2.Bucket(
        oss2.Auth("LTAI5tF8aeb3S3ypTRK9kxgt", "1pQGu6dS1NuEFh0pO5O0WjVGHJOWF6"),
        "oss-cn-hangzhou.aliyuncs.com",
        "wfq-gov-file",
    )
    to_db = MysqlDB(
        ip=NET_ROBOT_MYSQL_CONFIG["MYSQL_IP"],
        port=NET_ROBOT_MYSQL_CONFIG['MYSQL_PORT'],
        db=NET_ROBOT_MYSQL_CONFIG['MYSQL_DB'],
        user_name=NET_ROBOT_MYSQL_CONFIG['MYSQL_USER_NAME'],
        user_pass=NET_ROBOT_MYSQL_CONFIG['MYSQL_USER_PASS'],
    )

    def start_requests(self):
        url = "https://www.bse.cn/disclosureInfoController/companyAnnouncement.do"
        params = {
            "callback": "jQuery331_1723688214979"
        }
        sql = "SELECT stock_code, stock_abb FROM `net_robot`.`net_listed_company_info` WHERE `listed_exchange` = '北京证券交易所'"
        stock_code_list = self.to_db.find(sql)
        for stock_code, stock_abb in stock_code_list:
            data = {
                'disclosureSubtype[]': [
                    '9503-1001',
                    '9503-1005',
                ],
                'page': '',
                'companyCd': stock_code,
                'isNewThree': '1',
                'startTime': '2014-08-01',
                'endTime': '2024-08-15',
                'keyword': '',
                'xxfcbj[]': '2',
                'needFields[]': [
                    'companyCd',
                    'companyName',
                    'disclosureTitle',
                    'disclosurePostTitle',
                    'destFilePath',
                    'publishDate',
                    'xxfcbj',
                    'destFilePath',
                    'fileExt',
                    'xxzrlx',
                ],
                'sortfield': 'xxssdq',
                'sorttype': 'asc',
            }
            yield scrapy.FormRequest(url + "?" + urlencode(params), method='POST', formdata=data, callback=self.parse_list, cookies=self.cookies, headers=self.headers, meta={'data': data, 'stock_abb': stock_abb, 'stock_code': stock_code})

    def parse_list(self, response, **kwargs):
        start_data = response.meta.get("start_data")
        stock_code = response.meta.get("stock_code")
        stock_abb = response.meta.get("stock_abb")
        # print(start_params)
        # print(response.text)
        lines = json.loads(re.findall(r"jQuery331_\d+\((.*)\)", response.text)[0])[0]

        for line in lines['listInfo']['content']:
            if '摘要' not in line['disclosureTitle']:
                report_uuid = hashlib.md5(line['destFilePath'].encode('UTF-8')).hexdigest()
                report_url = f"https://www.bse.cn{line['destFilePath']}"
                save_path = f'https://wfq-gov-file.oss-cn-hangzhou.aliyuncs.com/listed_company_year_report/{stock_code}/{line["disclosureTitle"]}_{report_uuid}.pdf'

                item = NetListedCompanyYearReportItem(**{
                    'stock_code': stock_code,
                    'stock_abb': stock_abb,
                    'report_uuid': report_uuid,
                    'report_name': line['disclosureTitle'],
                    'report_url': report_url,
                    'report_date': line['publishDate'],
                    'report_file_data': {'oss_url': save_path},
                })
                sql = f"SELECT * FROM `net_robot`.`net_listed_company_year_report` WHERE `stock_code` = '{stock_code}' AND `report_uuid` = '{report_uuid}'"
                existed = self.to_db.find(sql)
                if existed:
                    logger.info(f"{stock_code} 已存在: {line['disclosureTitle']} /{line['publishDate']}")
                    continue
                # print(item)
                yield scrapy.Request(report_url, callback=self.parse_detail, cookies=self.cookies, headers=self.headers, cb_kwargs={'item': item})

        page_count = lines['listInfo']['totalPages']
        if response.meta.get("is_next") is not False:
            if page_count > 1:
                for page_num in range(2, int(page_count) + 1):
                    url = "https://www.bse.cn/disclosureInfoController/companyAnnouncement.do"
                    params = {
                        "callback": "jQuery331_1723688214979"
                    }
                    data = start_data
                    logger.info(f"{self.listed_exchange} 分页: {page_num} /{page_count}")
                    data["page"] = f"{page_num}"
                    yield scrapy.FormRequest(url + "?" + urlencode(params), method='POST', formdata=data, callback=self.parse_list, cookies=self.cookies, headers=self.headers, meta={'data': data, 'stock_abb': stock_abb, 'stock_code': stock_code, 'is_next': False})

    def parse_detail(self, response, **kwargs):
        item = kwargs.get('item')
        res = self.bucket.put_object(item['report_file_data']['oss_url'], response.body)
        if res.status == 200:
            print(item, len(response.body))
            yield item
        else:
            raise Exception(res)


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl bse_report".split())
