#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/9/22 14:53
# @Author  : 王凯
# @File    : liaoning_grade.py
# @Project : spider-man
import datetime
import re

import scrapy

from apps.creadit_grade_a.creadit_grade_a.items import NetCreditGradeAItem
from utils.tools import urlencode, parse_url_params


class LiaoningGradeSpider(scrapy.Spider):
    name = "liaoning_grade"
    province = "辽宁"
    url = "http://liaoning.chinatax.gov.cn/col/col6615/index.html"
    Request = scrapy.Request

    def start_requests(self):
        url = "http://liaoning.chinatax.gov.cn/col/col6615/index.html"
        yield self.Request(url)

    def parse(self, response, **kwargs):
        evalyear = response.xpath('//select[@id="evalyear"]/option')
        for year_select in evalyear:
            if str(year_select.xpath('string()').get().strip("年度")) in [str(i) for i in range(datetime.datetime.now().year - 1, datetime.datetime.now().year + 1)]:
                year_option = year_select.xpath("./@value").get()
                url_2 = "http://liaoning.chinatax.gov.cn/module/search/index.jsp"
                params = {
                    "field": "sbh:1:1,mc:1:0,dqxx:1:0",
                    "i_columnid": year_option,
                    "sbh": "",
                    "mc": "",
                    "dqxx": "",
                    "currpage": "1",
                }
                yield self.Request(url_2 + "?" + urlencode(params), callback=self.parse_page)

    def parse_page(self, response, **kwargs):
        root_url, request_data = parse_url_params(response.request.url)
        yield from self.parse_detail(response, **kwargs)
        max_page_list = re.findall(r"共\D*(\d*)\D*页", response.text, flags=re.S)
        if max_page_list:
            int_max_page = int(max_page_list[0])
            url_2 = "http://liaoning.chinatax.gov.cn/module/search/index.jsp"
            for page in range(2, int_max_page + 1):
                params = {
                    "field": "sbh:1:1,mc:1:0,dqxx:1:0",
                    "i_columnid": request_data["i_columnid"],
                    "sbh": "",
                    "mc": "",
                    "dqxx": "",
                    "currpage": str(page),
                }
                yield self.Request(url_2 + "?" + urlencode(params), callback=self.parse_detail)

    def parse_detail(self, response, **kwargs):
        tag_tr_list = response.xpath('//div[@class="contentTable"]/table[2]/tbody/table/tr')
        for tag_tr in tag_tr_list:
            company_name = tag_tr.xpath("./td[1]/text()").get()
            taxpayer_id = tag_tr.xpath("./td[2]/text()").get()
            year = tag_tr.xpath("./td[3]/text()").get()
            if taxpayer_id and year:
                item = NetCreditGradeAItem()
                item.taxpayer_id = taxpayer_id
                item.company_name = company_name
                item.year = year
                item.province = self.province
                yield item


def run():
    from scrapy import cmdline

    cmdline.execute("scrapy crawl liaoning_grade".split())


if __name__ == "__main__":
    run()
