import scrapy
import json
import pandas as pd


class A2022Q1Spider(scrapy.Spider):
    name = '2022_q1'
    allowed_domains = ['eastmoney.com']
    start_url_p1 = "https://datacenter-web.eastmoney.com/api/data/v1/get?" \
                   "callback=jQuery112303505283648948614_1650951695106&sortColumns=" \
                   "UPDATE_DATE%2CSECURITY_CODE&sortTypes=-1%2C-1&pageSize=50&pageNumber="
    start_url_p2 = "&reportName=RPT_LICO_FN_CPD&columns=ALL&filter=(REPORTDATE%3D%272022-03-31%27)"
    page_num = 1
    start_urls = [start_url_p1 + str(page_num) + start_url_p2]
    global_df = pd.DataFrame()

    def parse(self, response):
        self.logger.info('Got successful response from {}'.format(response.url))
        try:
            r_txt = response.text
            r_txt_beg = str(r_txt).find('{')
            r_txt_end = str(r_txt).rfind('}')
            r_txt = str(r_txt)[r_txt_beg:r_txt_end + 1]
            r_json = json.loads(r_txt)
            result = r_json.get('result')
            if result is None:
                print("A2022Q1Spider Got Last Page: " + str(self.page_num - 1) + "\n")
                self.global_df.to_excel('./json_data.xlsx')
                return
            li = result.get('data')
            if self.page_num == 1:
                self.global_df = pd.DataFrame(li)
            else:
                data = pd.DataFrame(li)
                self.global_df = self.global_df.append(data, ignore_index=True)

            self.page_num = self.page_num + 1
            url = self.start_url_p1 + str(self.page_num) + self.start_url_p2
            yield scrapy.Request(url, callback=self.parse)
        except Exception as e:
            print("A2022Q1Spider Got Exception:\n")
            print(e.args[0])
            #self.global_df.to_excel('./json_data.xlsx')