import json
import re
import time
import random

import scrapy
import requests
from CrawlAnnualReports.items import CrawlannualreportsItem
# from CrawlAnnualReports.CrawlAnnualReports.middlewares import CrawlannualreportsSpiderMiddleware

class ExampleSpider(scrapy.Spider):
    name = "example"
    allowed_domains = ["www.cninfo.com.cn"]
    start_url = "http://www.cninfo.com.cn/new/hisAnnouncement/query"

    def gen_request_data(self, page_num, start_date, end_date):
        page_num = int(page_num)
        data = {
            "pageNum":f"{page_num}",
            "pageSize":"30",
            "column":"szse",
            "tabName":"fulltext",
            "plate":"",
            "stock":"",
            "searchkey":"",
            "secid":"",
            "category":"category_ndbg_szsh",
            "trade":"",
            "seDate":"{}~{}".format(start_date, end_date),
            "sortName":"",
            "sortType":"",
            "isHLtitle":"true"
        }
        return data

    def start_requests(self):
        # [11,2574,2563,2060,2075,3118,3043,3196,3724,3627,3847,4564,5107,5542,5586,5811,6247,6873,7935,7945,8298,9296,11137,11426]
        # [("2001-01-01", "2002-01-01"), ("2002-01-01", "2003-01-01"), ("2003-01-01", "2004-01-01"),
        #  ("2004-01-01", "2005-01-01"), ("2005-01-01", "2006-01-01"), ("2006-01-01", "2007-01-01"),
        #  ("2007-01-01", "2008-01-01"), ("2008-01-01", "2009-01-01"), ("2009-01-01", "2010-01-01"),
        #  ("2010-01-01", "2011-01-01"), ("2011-01-01", "2012-01-01"), ("2012-01-01", "2013-01-01"),
        #  ("2013-01-01", "2014-01-01"), ("2014-01-01", "2015-01-01"), ("2015-01-01", "2016-01-01"),
        #  ("2016-01-01", "2017-01-01"), ("2017-01-01", "2018-01-01"), ("2018-01-01", "2019-01-01"),
        #  ("2019-01-01", "2020-01-01"), ("2020-01-01", "2021-01-01"), ("2021-01-01", "2022-01-01"),
        #  ("2022-01-01", "2023-01-01"), ("2023-01-01", "2024-01-01"), ("2024-01-01", "2025-01-01")]
        for (start_date,end_date,num) in [("2001-01-01","2004-01-01",4634), ("2004-01-01","2007-01-01",8227),("2007-01-01","2010-01-01",10547),("2010-01-01","2013-01-01",13518),
                                          ("2013-01-01","2016-01-01",16939),("2016-01-01","2019-01-01",21055),("2019-01-01","2022-01-01",25539),("2022-01-01","2025-01-01",32961)]:
            self.logger.info(f"{(start_date, end_date)}")
            k = 1
            while True:
                data = self.gen_request_data(k, start_date, end_date)
                r = requests.post(url=self.start_url, data=data)
                # time.sleep(random.uniform(1,2))
                result = r.json()["announcements"]
                if result is not None:
                    for i in result:
                        if re.search("摘要",i['announcementTitle']):
                            pass
                        else:
                            item = CrawlannualreportsItem()
                            item["title"] = i['announcementTitle']
                            item["sec_name"] = str(i['secName'])
                            item["sec_code"] = str(i['secCode'])
                            item["adjunct_url"] = i['adjunctUrl']
                            item["down_url"] = 'http://static.cninfo.com.cn/' + item["adjunct_url"]
                            item["file_name"] = f'{item["sec_code"]}-{item["sec_name"]}-{item["title"]}.pdf'
                            yield item

                # r = scrapy.FormRequest(url=self.start_url, formdata=data, callback=self.parse, errback=self.handle_error)

                self.logger.info(f"Record: {start_date} {end_date} {k}")
                k += 1

                if k > num:
                    break
                # yield r

    def parse(self, response, *args, **kwargs):
        self.logger.info("Response status: %s", response.status)
        self.logger.info("Response body: %s", response.text)

        if response.status == 200:
            if response.json()["totalAnnouncement"] > 0:
                self.logger.info("Form submitted successfully!")
                result = response.json()["announcements"]
                if result is not None:
                    for i in result:
                        if re.search("摘要",i['announcementTitle']):
                            pass
                        else:
                            item = CrawlannualreportsItem()
                            item["title"] = i['announcementTitle'].replace('*','')
                            item["sec_name"] = i['secName'].replace('*','')
                            item["sec_code"] = i['secCode']
                            item["adjunct_url"] = i['adjunctUrl']
                            item["down_url"] = 'http://static.cninfo.com.cn/' + item["adjunct_url"]
                            item["file_name"] = f'{item["sec_code"]}-{item["sec_name"]}-{item["title"]}.pdf'
                            yield item
            else:
                raise ValueError(f"totalAnnouncement = 0")
        else:
            self.logger.error("Form submission failed.")
            raise ValueError(f"error")

    def handle_error(self, failure):
        self.logger.error("Request failed: %s", failure)
        # raise ValueError(f"totalAnnouncement = 0")