# -*- coding: utf-8 -*-
import datetime
import hashlib
import json
import re
import time

import requests
import scrapy
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta

from apps.listed_company.listed_company.items import ListedCompanyItem, NetListedCompanyYearReportItem, NetListedCompanyValuationDataDetailsItem, NetAdministrativePenaltiesItem
from loguru import logger

from components.config import NET_ROBOT_MYSQL_CONFIG
from utils.db.mysqldb import MysqlDB
from utils.time_tools import get_random_date_list
from utils.tools import urlencode, urldecode
import oss2


class XzcfCsrcSpider(scrapy.Spider):
    listed_exchange = '中国证券监督管理委员会'
    name = 'xzcf_csrc'
    headers = {
        "Accept": "*/*",
        "Accept-Language": "zh,zh-TW;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6",
        "Proxy-Connection": "keep-alive",
        "Referer": "http://www.csrc.gov.cn/csrc/c101971/zfxxgk_zdgk.shtml?channelid=17d5ff2fe43e488dba825807ae40d63f",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
        "X-Requested-With": "XMLHttpRequest"
    }
    cookies = {
        "_gscu_516223281": "25343910n01mtd17",
        "_gscbrs_516223281": "1",
        "acw_tc": "1a0c65da17284551185465164e013483d83d0f48e304da052ccf56cd15e769",
        "_yfxkpy_ssid_10008998": "%7B%22_yfxkpy_firsttime%22%3A%221725343911602%22%2C%22_yfxkpy_lasttime%22%3A%221728439104978%22%2C%22_yfxkpy_visittime%22%3A%221728455117150%22%2C%22_yfxkpy_cookie%22%3A%2220240903141151603312846130532850%22%2C%22_yfxkpy_returncount%22%3A%221%22%7D",
        "_gscs_516223281": "t284528718wbimo19|pv:12"
    }

    custom_settings = {
        # "HTTPERROR_ALLOWED_CODES": [304, 200],
    }

    def start_requests(self):
        url = "http://www.csrc.gov.cn/searchList/17d5ff2fe43e488dba825807ae40d63f"
        params = {
            "_isAgg": "true",
            "_isJson": "true",
            "_pageSize": "10",
            "_template": "index",
            "_rangeTimeGte": "",
            "_channelName": "",
            "page": "1"
        }
        yield scrapy.Request(url + "?" + urlencode(params), callback=self.parse_list, cookies=self.cookies, headers=self.headers, meta={'params': params})

    def parse_list(self, response, **kwargs):
        start_params = response.meta.get("params")
        response_json = response.json()
        total = response_json['data']['total']
        for line in response_json['data']['results']:
            item = NetAdministrativePenaltiesItem(**{
                'title': line['title'],
                'published_date': line['publishedTimeStr'].split()[0],
                'url': line['url'],
                'content': {'content': line['content']},
                'decision_no': None,
                'decision_uuid': line['manuscriptId'],
            })
            re_res = re.findall(r'(^.*?号)', line['content'])
            if re_res:
                item['decision_no'] = re_res[0]
            # print(item)
            yield item
        #
        # if response.meta.get("is_next") is not False:
        #     if total > 10:
        #         for page_num in range(2, (total // 10) + 2):
        #             url = "http://www.csrc.gov.cn/searchList/17d5ff2fe43e488dba825807ae40d63f"
        #             params = start_params
        #             logger.info(f"{self.listed_exchange}  分页: {page_num} /{total // 10}")
        #             params["page"] = f"{page_num}"
        #             yield scrapy.Request(url + "?" + urlencode(params), callback=self.parse_list, cookies=self.cookies, headers=self.headers, meta={'params': params, 'is_next': False})


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl xzcf_csrc".split())
