# -*- coding: utf-8 -*-
import datetime
import json
import random
import re
import time
from decimal import Decimal

import scrapy
from dateutil.parser import parse

from apps.listed_company.listed_company.items import ListedCompanyItem
from loguru import logger
from utils.tools import urlencode, urldecode


class SzseSpider(scrapy.Spider):
    listed_exchange = '深圳证券交易所'
    name = 'szse'
    headers = {
        "Accept": "application/json, text/javascript, */*; q=0.01",
        "Accept-Language": "zh,zh-TW;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6",
        "Connection": "keep-alive",
        "Content-Type": "application/json",
        "Referer": "https://www.szse.cn/market/product/stock/list/index.html",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-origin",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
        "X-Request-Type": "ajax",
        "X-Requested-With": "XMLHttpRequest",
        "sec-ch-ua": "\"Not/A)Brand\";v=\"8\", \"Chromium\";v=\"126\", \"Google Chrome\";v=\"126\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Windows\""
    }

    def to_decimal(self, text: str):
        if text is None:
            return None
        text = text.replace(',', '').replace('，', '')
        res = Decimal("%.4f" % Decimal(text))
        return res

    def start_requests(self):
        url = "https://www.szse.cn/api/report/ShowReport/data"
        params = {
            "SHOWTYPE": "JSON",
            "CATALOGID": "1110",
            "TABKEY": "tab1",
            "PAGENO": "1",
            "random": str(random.random())
        }
        yield scrapy.Request(url + "?" + urlencode(params), callback=self.parse_list, headers=self.headers, meta={'params': params})

    def parse_list(self, response, **kwargs):
        lines = response.json()[0]
        start_params = response.meta.get("params")
        for line in lines['data']:
            a_stock_code = line['agdm']
            url = "https://www.szse.cn/api/report/index/companyGeneralization"
            params = {
                "random": str(random.random()),
                "secCode": a_stock_code
            }

            yield response.follow(url + "?" + urlencode(params), callback=self.parse_detail1, meta={'a_stock_code': a_stock_code, 'stock_sector': line['bk']})
        page_count = lines['metadata']['pagecount']
        if response.meta.get("is_next") is not False:
            if page_count > 1:
                for page_num in range(1, int(page_count) + 1):
                    url = "https://www.szse.cn/api/report/ShowReport/data"
                    params = start_params
                    logger.info(f"{self.listed_exchange} 分页: {page_num} /{page_count}")
                    params["PAGENO"] = f"{page_num}"
                    params["_"] = str(random.random())
                    yield scrapy.Request(url + "?" + urlencode(params), callback=self.parse_list, headers=self.headers, meta={'params': params, 'is_next': False})

    def parse_detail1(self, response, **kwargs):
        data = response.json()
        stock_sector = response.meta.get('stock_sector')
        a_stock_code = response.meta.get('a_stock_code')
        # 提取所需字段
        item_A = None
        item_B = None
        result = data['data']
        if result['agdm']:
            item_A = ListedCompanyItem(**{
                'company_full_name': result['gsqc'],
                'company_eng_name': result['ywqc'],
                'stock_code': result['agdm'],
                'stock_abb': result['agjc'],
                'expand_stock_abb': result['agdjc'],
                'stock_sector': stock_sector,
                'stock_state': '上市',
                'legal_name': None,
                'board_secretary_name': None,
                'registered_address': result['zcdz'],
                'cor_address': None,
                'zip_code': None,
                'email': None,
                'list_date': parse(result['agssrq']).date(),
                'total_share_capital': self.to_decimal(result['agzgb']),  # 总股本（万股）
                'limited_share_capital': self.to_decimal(result['agzgb']) - self.to_decimal(result['agltgb']),  # 有限售流通股（万股）
                'out_share_capital': self.to_decimal(result['agltgb']),  # 流通股本（万股）
                'telphone': None,
                'website': result['http'],  # 公司网址
                'listed_exchange': self.listed_exchange,
                'source_url': f"https://www.szse.cn/certificate/individual/index.html?code={a_stock_code}",  # 来源网址
                'delisting_date': None,  # 退市日期
                'security_type': 'A股',
                'whet_not_st_company': '是' if 'ST' in result['agjc'] else '否',
                'whet_not_delisting_organize_stocks': '是' if ('退市' in result['agjc'] or result['agjc'].endswith('退')) else '否',
                'province': result['sheng'],
                'taxpayer_industry1': result['sshymc'],
                'taxpayer_industry2': None,
                'trade_type': None,  # 交易方式
                'data_newest_query_date': datetime.date.today()  # 数据最新查询时间
            })
        if result['bgdm']:
            item_B = ListedCompanyItem(**{
                'company_full_name': result['gsqc'],
                'company_eng_name': result['ywqc'],
                'stock_code': result['bgdm'],
                'stock_abb': result['bgjc'],
                'expand_stock_abb': result['bgdjc'],
                'stock_sector': stock_sector,
                'stock_state': '上市',
                'legal_name': None,
                'board_secretary_name': None,
                'registered_address': result['zcdz'],
                'cor_address': None,
                'zip_code': None,
                'email': None,
                'list_date': parse(result['bgssrq']).date(),
                'total_share_capital': self.to_decimal(result['bgzgb']),  # 总股本（万股）
                'limited_share_capital': self.to_decimal(result['bgzgb']) - self.to_decimal(result['bgltgb']),  # 有限售流通股（万股）
                'out_share_capital': self.to_decimal(result['bgltgb']),  # 流通股本（万股）
                'telphone': None,
                'website': result['http'],  # 公司网址
                'listed_exchange': self.listed_exchange,
                'source_url': f"https://www.szse.cn/certificate/individual/index.html?code={a_stock_code}",  # 来源网址
                'delisting_date': None,  # 退市日期
                'security_type': 'B股',
                'whet_not_st_company': '是' if 'ST' in result['agjc'] else '否',
                'whet_not_delisting_organize_stocks': '是' if ('退市' in result['agjc'] or result['agjc'].endswith('退')) else '否',
                'province': result['sheng'],
                'taxpayer_industry1': result['sshymc'],
                'taxpayer_industry2': None,
                'trade_type': None,  # 交易方式
                'data_newest_query_date': datetime.date.today()  # 数据最新查询时间
            })

        url = "https://www.szse.cn/api/report/ShowReport/data"
        params = {
            "SHOWTYPE": "JSON",
            "CATALOGID": "1901",
            "txtDMorJC": a_stock_code,
            "loading": "first",
            "random": str(random.random())
        }
        yield response.follow(url + "?" + urlencode(params), callback=self.parse_detail, cb_kwargs={'item_A': item_A, 'item_B': item_B})

    def parse_detail(self, response, **kwargs):
        data = response.json()[0]["data"]
        item_A = kwargs.get('item_A')
        item_B = kwargs.get('item_B')
        board_secretary_name = None
        for person in data:
            if person['rzqk'] == '董事会秘书':
                board_secretary_name = person['xm']
        if item_A is not None:
            item_A['board_secretary_name'] = board_secretary_name
            # print(item_A)
            yield item_A
        if item_B is not None:
            item_B['board_secretary_name'] = board_secretary_name
            # print(item_B)
            yield item_B


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl szse".split())
