# -*- coding: utf-8 -*-
import datetime
import json
import re
import time

import scrapy
from dateutil.parser import parse

from apps.listed_company.listed_company.items import DelistingInformationSourceItem
from loguru import logger
from utils.tools import urlencode, urldecode


class SseDeSpider(scrapy.Spider):
    listed_exchange = '上海证券交易所'
    name = 'sse_de'
    cookies = {
        "gdp_user_id": "gioenc-11e1g4b0%2C6e9a%2C523e%2Cc27g%2C7788beb49954",
        "ba17301551dcbaf9_gdp_session_id": "bbb44465-d254-4a30-b4f6-fe78430f790b",
        "ba17301551dcbaf9_gdp_session_id_sent": "bbb44465-d254-4a30-b4f6-fe78430f790b",
        "ba17301551dcbaf9_gdp_sequence_ids": "{%22globalKey%22:108%2C%22VISIT%22:3%2C%22PAGE%22:8%2C%22VIEW_CLICK%22:100}"
    }
    headers = {
        "Accept": "*/*",
        "Accept-Language": "zh,zh-TW;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6",
        "Connection": "keep-alive",
        "Referer": "https://www.sse.com.cn/",
        "Sec-Fetch-Dest": "script",
        "Sec-Fetch-Mode": "no-cors",
        "Sec-Fetch-Site": "same-site",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
        "sec-ch-ua": "\"Not/A)Brand\";v=\"8\", \"Chromium\";v=\"126\", \"Google Chrome\";v=\"126\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Windows\""
    }

    def start_requests(self):
        url = "https://query.sse.com.cn/commonQuery.do"
        for stock_type in [
            '1, 2',  # A,B
            '8'  # 科创
        ]:
            params = {
                "jsonCallBack": "jsonpCallback60658443",
                "sqlId": "COMMON_SSE_CP_GPJCTPZ_GPLB_GP_L",
                "isPagination": "true",
                "STOCK_CODE": "",
                "REG_PROVINCE": "",
                "STOCK_TYPE": stock_type,
                "COMPANY_STATUS": "3",
                "type": "inParams",
                "pageHelp.cacheSize": "1",
                "pageHelp.beginPage": "1",
                "pageHelp.pageSize": "25",
                "pageHelp.pageNo": "1",
                "pageHelp.endPage": "1",
                "_": str(int(time.time() * 1000)),
            }
            yield scrapy.Request(url + "?" + urlencode(params), callback=self.parse_list, cookies=self.cookies, headers=self.headers, meta={'params': params})

    def parse_list(self, response, **kwargs):
        lines = json.loads(re.findall(r"jsonpCallback\d+\((.*)\)", response.text)[0])
        start_params = response.meta.get("params")
        stock_type = start_params['STOCK_TYPE']

        for line in lines['result']:
            a_stock_code = line['A_STOCK_CODE']
            if a_stock_code == '-':
                a_stock_code = line['B_STOCK_CODE']
            url = "https://query.sse.com.cn/commonQuery.do"
            params = {
                "jsonCallBack": "jsonpCallback65420417",
                "isPagination": "false",
                "sqlId": "COMMON_SSE_CP_GPJCTPZ_GPLB_GPGK_GSGK_C",
                "COMPANY_CODE": a_stock_code,
                "_": str(int(time.time() * 1000)),
            }
            yield response.follow(url + "?" + urlencode(params), callback=self.parse_detail1, meta={'a_stock_code': a_stock_code, 'delist_date': line['DELIST_DATE'], })
        page_count = lines['pageHelp']['pageCount']
        if response.meta.get("is_next") is not False:
            if page_count > 1:
                for page_num in range(2, int(page_count) + 1):
                    url = "https://query.sse.com.cn/commonQuery.do"
                    params = start_params
                    logger.info(f"{self.listed_exchange} {stock_type} 分页: {page_num} /{page_count}")
                    params["pageHelp.beginPage"] = f"{page_num}"
                    params["pageHelp.pageNo"] = f"{page_num}"
                    params["pageHelp.endPage"] = f"{page_num}"
                    params["_"] = str(int(time.time() * 1000))
                    yield scrapy.Request(url + "?" + urlencode(params), callback=self.parse_list, cookies=self.cookies, headers=self.headers, meta={'params': params, 'is_next': False})

    def parse_detail1(self, response, **kwargs):
        data = json.loads(re.findall(r"jsonpCallback\d+\((.*)\)", response.text)[0])
        a_stock_code = response.meta.get('a_stock_code')
        delist_date = response.meta.get('delist_date')
        # 提取所需字段
        item_A = None
        item_B = None
        result = data['result'][0]
        if '主板' in result['SEC_TYPE']:
            stock_sector = '主板'
        elif '科创' in result['SEC_TYPE']:
            stock_sector = '科创板'
        if result['A_STOCK_CODE'] and result['A_STOCK_CODE'] != '-':
            item_A = DelistingInformationSourceItem(**{
                'company_full_name': result['FULL_NAME'],
                'company_eng_name': result['FULL_NAME_EN'],
                'stock_code': result['A_STOCK_CODE'],
                'stock_abb': None,
                'expand_stock_abb': None,
                'stock_sector': stock_sector,
                'stock_state': result['STATE_CODE_A_DESC'],
                'legal_name': result['LEGAL_REPRESENTATIVE'].strip(),
                'board_secretary_name': result['NAME'],
                'registered_address': result['REG_ADDRESS'],
                'cor_address': result['OFFICE_ADDRESS'],
                'zip_code': result['OFFICE_ZIP'],
                'email': result['E_MAIL_ADDRESS'],
                'list_date': parse(result['A_LIST_DATE']).date(),
                # 'list_date': result['A_LIST_DATE'],
                'total_share_capital': None,  # 总股本（万股）
                'limited_share_capital': None,  # 有限售流通股（万股）
                'out_share_capital': None,  # 流通股本（万股）
                'telphone': None,
                'website': None,  # 公司网址
                'listed_exchange': self.listed_exchange,
                'source_url': f"https://www.sse.com.cn/assortment/stock/list/info/company/index.shtml?COMPANY_CODE={a_stock_code}",  # 来源网址
                'delisting_date': parse(delist_date).date(),  # 退市日期
                'security_type': 'CDR' if "CDR" in result['SEC_TYPE'] else 'A股',
                'whet_not_st_company': '是' if 'ST' in result['COMPANY_ABBR'] else '否',
                'whet_not_delisting_organize_stocks': '是' if ('退市' in result['COMPANY_ABBR'] or result['COMPANY_ABBR'].endswith('退')) else '否',
                'province': result['AREA_NAME'],
                'taxpayer_industry1': result['CSRC_CODE_DESC'],
                'taxpayer_industry2': result['CSRC_GREAT_CODE_DESC'],
                'trade_type': None,  # 交易方式
                'data_newest_query_date': datetime.date.today()  # 数据最新查询时间
            })
        if result['B_STOCK_CODE'] and result['B_STOCK_CODE'] != '-':
            item_B = DelistingInformationSourceItem(**{
                'company_full_name': result['FULL_NAME'],
                'company_eng_name': result['FULL_NAME_EN'],
                'stock_code': result['B_STOCK_CODE'],
                'stock_abb': None,
                'expand_stock_abb': None,
                'stock_sector': stock_sector,
                'stock_state': result['STATE_CODE_B_DESC'],
                'legal_name': result['LEGAL_REPRESENTATIVE'].strip(),
                'board_secretary_name': result['NAME'],
                'registered_address': result['REG_ADDRESS'],
                'cor_address': result['OFFICE_ADDRESS'],
                'zip_code': result['OFFICE_ZIP'],
                'email': result['E_MAIL_ADDRESS'],
                'list_date': parse(result['B_LIST_DATE']).date(),
                # 'list_date': result['B_LIST_DATE'],
                'total_share_capital': None,  # 总股本（万股）
                'limited_share_capital': None,  # 有限售流通股（万股）
                'out_share_capital': None,  # 流通股本（万股）
                'telphone': None,
                'website': None,  # 公司网址
                'listed_exchange': self.listed_exchange,
                'source_url': f"https://www.sse.com.cn/assortment/stock/list/info/company/index.shtml?COMPANY_CODE={a_stock_code}",  # 来源网址
                'delisting_date': parse(delist_date).date(),  # 退市日期
                'security_type': 'CDR' if "CDR" in result['SEC_TYPE'] else 'B股',
                'whet_not_st_company': '是' if 'ST' in result['COMPANY_ABBR'] else '否',
                'whet_not_delisting_organize_stocks': '是' if ('退市' in result['COMPANY_ABBR'] or result['COMPANY_ABBR'].endswith('退')) else '否',
                'province': result['AREA_NAME'],
                'taxpayer_industry1': result['CSRC_CODE_DESC'],
                'taxpayer_industry2': result['CSRC_GREAT_CODE_DESC'],
                'trade_type': None,  # 交易方式
                'data_newest_query_date': datetime.date.today()  # 数据最新查询时间
            })
        print(item_A)
        yield item_A
        print(item_B)
        yield item_B


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl sse_de".split())
