# -*- coding: utf-8 -*-
import datetime
import hashlib
import json
import re
import time

import requests
import scrapy
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta

from apps.listed_company.listed_company.items import ListedCompanyItem, NetListedCompanyYearReportItem
from loguru import logger

from components.config import NET_ROBOT_MYSQL_CONFIG
from utils.db.mysqldb import MysqlDB
from utils.time_tools import get_random_date_list
from utils.tools import urlencode, urldecode
import oss2


class SseReportSpider(scrapy.Spider):
    listed_exchange = '上海证券交易所'
    name = 'sse_report'
    headers = {
        "Accept": "*/*",
        "Accept-Language": "zh,zh-TW;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6",
        "Connection": "keep-alive",
        "Referer": "https://www.sse.com.cn/",
        "Sec-Fetch-Dest": "script",
        "Sec-Fetch-Mode": "no-cors",
        "Sec-Fetch-Site": "same-site",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
        "sec-ch-ua": "\"Not)A;Brand\";v=\"99\", \"Google Chrome\";v=\"127\", \"Chromium\";v=\"127\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Windows\""
    }
    cookies = {
        "gdp_user_id": "gioenc-11e1g4b0%2C6e9a%2C523e%2Cc27g%2C7788beb49954",
        "JSESSIONID": "CCB9F4BE607A2FE5C088E3304E22D92E",
        "ba17301551dcbaf9_gdp_session_id": "da2c2a8c-2305-4def-a851-a458332ff73a",
        "ba17301551dcbaf9_gdp_session_id_sent": "da2c2a8c-2305-4def-a851-a458332ff73a",
        "ba17301551dcbaf9_gdp_sequence_ids": "{%22globalKey%22:888%2C%22VISIT%22:14%2C%22PAGE%22:71%2C%22VIEW_CLICK%22:756%2C%22VIEW_CHANGE%22:23%2C%22CUSTOM%22:29}"
    }
    bucket = oss2.Bucket(
        oss2.Auth("LTAI5tF8aeb3S3ypTRK9kxgt", "1pQGu6dS1NuEFh0pO5O0WjVGHJOWF6"),
        "oss-cn-hangzhou.aliyuncs.com",
        "wfq-gov-file",
    )
    to_db = MysqlDB(
        ip=NET_ROBOT_MYSQL_CONFIG["MYSQL_IP"],
        port=NET_ROBOT_MYSQL_CONFIG['MYSQL_PORT'],
        db=NET_ROBOT_MYSQL_CONFIG['MYSQL_DB'],
        user_name=NET_ROBOT_MYSQL_CONFIG['MYSQL_USER_NAME'],
        user_pass=NET_ROBOT_MYSQL_CONFIG['MYSQL_USER_PASS'],
    )

    def start_requests(self):

        url = "https://query.sse.com.cn/security/stock/queryCompanyBulletin.do"
        random_date_list = get_random_date_list(datetime.date.today() - relativedelta(days=365 * 10), datetime.date.today(), [1080, 1080])
        sql = "SELECT stock_code, stock_abb FROM `net_robot`.`net_listed_company_info` WHERE `listed_exchange` = '上海证券交易所'"
        stock_code_list = self.to_db.find(sql)
        for stock_code, stock_abb in stock_code_list:
            # stock_code = stock_code[0]
            for date_list in random_date_list:
                beginDate = date_list[0]
                endDate = date_list[1]
                params = {
                    "jsonCallBack": "jsonpCallback61987308",
                    "isPagination": "true",
                    "pageHelp.pageSize": "25",
                    "pageHelp.pageNo": "1",
                    "pageHelp.beginPage": "1",
                    "pageHelp.cacheSize": "1",
                    "pageHelp.endPage": "1",
                    "productId": stock_code,
                    "securityType": "0101,120100,020100,020200,120200",
                    "reportType2": "DQBG",
                    "reportType": "ALL",
                    "beginDate": beginDate,
                    "endDate": endDate,
                    "_": str(int(time.time() * 1000)),
                }
                yield scrapy.Request(url + "?" + urlencode(params), callback=self.parse_list, cookies=self.cookies, headers=self.headers, meta={'params': params, 'stock_abb': stock_abb, 'stock_code': stock_code})

    def parse_list(self, response, **kwargs):
        start_params = response.meta.get("params")
        stock_code = response.meta.get("stock_code")
        stock_abb = response.meta.get("stock_abb")
        # print(start_params)
        # print(response.text)
        lines = json.loads(re.findall(r"jsonpCallback\d+\((.*)\)", response.text)[0])
        if lines['pageHelp']['total']:
            for line in lines['result']:
                if ('年度报告' in line['TITLE'] or '年报' in line['TITLE']) and '半年' not in line['TITLE'] and '季度' not in line['TITLE'] and '摘要' not in line['TITLE']:
                    report_uuid = hashlib.md5(line['URL'].encode('UTF-8')).hexdigest()
                    report_url = f"https://www.sse.com.cn{line['URL']}"
                    save_path = f'https://wfq-gov-file.oss-cn-hangzhou.aliyuncs.com/listed_company_year_report/{stock_code}/{line["TITLE"]}_{report_uuid}.pdf'

                    item = NetListedCompanyYearReportItem(**{
                        'stock_code': stock_code,
                        'stock_abb': stock_abb,
                        'report_uuid': report_uuid,
                        'report_name': line['TITLE'],
                        'report_url': f"https://www.sse.com.cn{line['URL']}",
                        'report_date': line['SSEDATE'],
                        'report_file_data': {'oss_url': save_path},
                    })
                    sql = f"SELECT * FROM `net_robot`.`net_listed_company_year_report` WHERE `stock_code` = '{stock_code}' AND `report_uuid` = '{report_uuid}'"
                    existed = self.to_db.find(sql)
                    if existed:
                        logger.info(f"{stock_code} 已存在: {line['TITLE']} /{line['SSEDATE']}")
                        continue

                    yield response.follow(report_url, callback=self.parse_detail, cb_kwargs={'item': item})

            page_count = lines['pageHelp']['pageCount']
            if response.meta.get("is_next") is not False:
                if page_count > 1:
                    for page_num in range(2, int(page_count) + 1):
                        url = "https://query.sse.com.cn/security/stock/queryCompanyBulletin.do"
                        params = start_params
                        logger.info(f"{self.listed_exchange} 分页: {page_num} /{page_count}")
                        params["pageHelp.beginPage"] = f"{page_num}"
                        params["pageHelp.pageNo"] = f"{page_num}"
                        params["pageHelp.endPage"] = f"{page_num}"
                        params["_"] = str(int(time.time() * 1000))
                        yield scrapy.Request(url + "?" + urlencode(params), callback=self.parse_list, cookies=self.cookies, headers=self.headers, meta={'params': params, 'stock_abb': stock_abb, 'stock_code': stock_code, 'is_next': False})

    def parse_detail(self, response, **kwargs):
        item = kwargs.get('item')
        res = self.bucket.put_object(item['report_file_data']['oss_url'], response.body)
        if res.status == 200:
            print(item, len(response.body))
            yield item
        else:
            raise Exception(res)


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl sse_report".split())
