import scrapy
import re
import math
import json
import redis


class CourtSpider(scrapy.Spider):

    name = "court"

    def __init__(self):
        super(CourtSpider, self).__init__()
        self.url = "https://www.bjcourt.gov.cn/cpws/index.htm"
        self.redis_conn = redis.Redis(db=1)
        self.total = 0
        self.court_list = [{'court_name': '北京市高级人民法院', 'court_short_name': '北京高院', 'court_id': '1'},
                           {'court_name': '北京市第一中级人民法院', 'court_short_name': '一中院', 'court_id': '2'},
                           {'court_name': '北京市第二中级人民法院', 'court_short_name': '二中院', 'court_id': '12'},
                           {'court_name': '北京市第三中级人民法院', 'court_short_name': '三中院', 'court_id': '29'},
                           {'court_name': '北京市第四中级人民法院', 'court_short_name': '四中院', 'court_id': '22'},
                           {'court_name': '北京知识产权法院', 'court_short_name': '知产', 'court_id': '30'},
                           {'court_name': '北京金融法院', 'court_short_name': '金融院', 'court_id': '31'},
                           {'court_name': '北京市东城区人民法院', 'court_short_name': '东城法院', 'court_id': '13'},
                           {'court_name': '北京市西城区人民法院', 'court_short_name': '西城法院', 'court_id': '3'},
                           {'court_name': '北京市朝阳区人民法院', 'court_short_name': '朝阳法院', 'court_id': '15'},
                           {'court_name': '北京市海淀区人民法院', 'court_short_name': '海淀法院', 'court_id': '6'},
                           {'court_name': '北京市丰台区人民法院', 'court_short_name': '丰台法院', 'court_id': '16'},
                           {'court_name': '北京市石景山区人民法院', 'court_short_name': '石景山法院', 'court_id': '5'},
                           {'court_name': '北京市门头沟区人民法院', 'court_short_name': '门头沟法院', 'court_id': '7'},
                           {'court_name': '北京市房山区人民法院', 'court_short_name': '房山法院', 'court_id': '8'},
                           {'court_name': '北京市通州区人民法院', 'court_short_name': '通州法院', 'court_id': '18'},
                           {'court_name': '北京市大兴区人民法院', 'court_short_name': '大兴法院', 'court_id': '10'},
                           {'court_name': '北京市顺义区人民法院', 'court_short_name': '顺义法院', 'court_id': '17'},
                           {'court_name': '北京市昌平区人民法院', 'court_short_name': '昌平法院', 'court_id': '9'},
                           {'court_name': '北京市怀柔区人民法院', 'court_short_name': '怀柔法院', 'court_id': '20'},
                           {'court_name': '北京市平谷区人民法院', 'court_short_name': '平谷法院', 'court_id': '19'},
                           {'court_name': '北京市密云区人民法院', 'court_short_name': '密云法院', 'court_id': '21'},
                           {'court_name': '北京市延庆区人民法院', 'court_short_name': '延庆法院', 'court_id': '11'},
                           {'court_name': '北京铁路运输法院', 'court_short_name': '铁路法院', 'court_id': '23'},
                           {'court_name': '北京互联网法院', 'court_short_name': '互联网院', 'court_id': '32'}]

    def start_requests(self):
        for year in range(2011, 2024 + 1):
            data = {
                "jbfyId": "",
                "startCprq": f"{year}-01-01",
                "endCprq": f"{year}-12-31",
                "page": "1"
            }
            yield scrapy.FormRequest(
                url=self.url, formdata=data, callback=self.parse1, meta={"year": year})

    def parse1(self, response):
        year = response.meta["year"]
        html_str = response.body.decode()
        writ_count = re.search('您搜到了<em>(\d+)</em>条符合条件的文书</span>', html_str)
        if writ_count:
            writ_count = int(writ_count.group(1))
            self.total += writ_count
            page = math.ceil(writ_count / 20)
            if writ_count <= 200:
                for i in range(1, page + 1):
                    data = {
                        "jbfyId": "",
                        "startCprq": f"{year}-01-01",
                        "endCprq": f"{year}-12-31",
                        "page": str(i)
                    }
                    self.redis_conn.rpush("bjcourt_task_list", json.dumps(data, ensure_ascii=False))
            elif writ_count > 200:
                for court in self.court_list:
                    court_id = court["court_id"]
                    court_short_name = court["court_short_name"]
                    data = {
                        "jbfyId": court_id,
                        "startCprq": f"{year}-01-01",
                        "endCprq": f"{year}-12-31",
                        "page": "1"
                    }
                    yield scrapy.FormRequest(
                        url=self.url, formdata=data, callback=self.parse2,
                        meta={"year": year, "court_short_name": court_short_name, "court_id": court_id})

    def parse2(self, response):
        year = response.meta["year"]
        court_short_name = response.meta["court_short_name"]
        court_id = response.meta["court_id"]

        html_str = response.body.decode()
        writ_count = re.search('您搜到了<em>(\d+)</em>条符合条件的文书</span>', html_str)
        if writ_count:
            writ_count = int(writ_count.group(1))
            page = math.ceil(writ_count / 20)
            if writ_count <= 200:
                for i in range(1, page + 1):
                    data = {
                        "jbfyId": court_id,
                        "startCprq": f"{year}-01-01",
                        "endCprq": f"{year}-12-31",
                        "page": i
                    }
                    self.redis_conn.rpush("bjcourt_task_list", json.dumps(data, ensure_ascii=False))
            elif writ_count > 200:
                month_list = [("01", "31"), ("02", "28"), ("03", "31"), ("04", "30"), ("05", "31"), ("06", "30"),
                              ("07", "31"),("08", "31"), ("09", "30"), ("10", "31"), ("11", "30"), ("12", "31")]
                for month in month_list:
                    month_num = month[0]
                    day_num = month[1]
                    data = {
                        "jbfyId": court_id,
                        "startCprq": f"{year}-{month_num}-01",
                        "endCprq": f"{year}-{month_num}-{day_num}",
                        "page": "1"
                    }
                    yield scrapy.FormRequest(
                        url=self.url, formdata=data, callback=self.parse3,
                        meta={"year": year, "court_short_name": court_short_name, "court_id": court_id,
                              "month_num": month_num, "day_num": day_num})

    def parse3(self, response):
        year = response.meta["year"]
        court_short_name = response.meta["court_short_name"]
        court_id = response.meta["court_id"]
        month_num = response.meta["month_num"]
        day_num = response.meta["day_num"]
        html_str = response.body.decode()
        writ_count = re.search('您搜到了<em>(\d+)</em>条符合条件的文书</span>', html_str)
        if writ_count:
            writ_count = int(writ_count.group(1))
            page = math.ceil(writ_count / 20)
            # 小于200条 分页把任务存入任务队列
            if writ_count <= 200:
                for i in range(1, page + 1):
                    data = {
                        "jbfyId": court_id,
                        "startCprq": f"{year}-{month_num}-01",
                        "endCprq": f"{year}-{month_num}-{day_num}",
                        "page": i
                    }
                    self.redis_conn.rpush("bjcourt_task_list", json.dumps(data, ensure_ascii=False))
            elif writ_count > 200:
                for day in range(1, int(day_num) + 1):
                    if day < 10:
                        day = f"0{day}"
                    data = {
                        "jbfyId": court_id,
                        "startCprq": f"{year}-{month_num}-{day}",
                        "endCprq": f"{year}-{month_num}-{day}",
                        "page": "1"
                    }
                    yield scrapy.FormRequest(
                        url=self.url, formdata=data, callback=self.parse4,
                        meta={"year": year, "court_short_name": court_short_name, "court_id": court_id,
                              "month_num": month_num, "day": day})

    def parse4(self, response):
        year = response.meta["year"]
        court_short_name = response.meta["court_short_name"]
        court_id = response.meta["court_id"]
        month_num = response.meta["month_num"]
        day = response.meta["day"]

        html_str = response.body.decode()
        writ_count = re.search('您搜到了<em>(\d+)</em>条符合条件的文书</span>', html_str)
        if writ_count:
            writ_count = int(writ_count.group(1))
            page = math.ceil(writ_count / 20)
            # 小于200条 分页把任务存入任务队列
            if writ_count <= 200:
                for i in range(1, page + 1):
                    data = {
                        "jbfyId": court_id,
                        "startCprq": f"{year}-{month_num}-{day}",
                        "endCprq": f"{year}-{month_num}-{day}",
                        "page": i
                    }
                    self.redis_conn.rpush("bjcourt_task_list", json.dumps(data, ensure_ascii=False))
            elif writ_count > 200:
                writ_type_list = [
                    {"writ_type": "民事", "writ_type_id": "3"},
                    {"writ_type": "刑事", "writ_type_id": "2"},
                    {"writ_type": "行政", "writ_type_id": "4"},
                    {"writ_type": "执行", "writ_type_id": "10"},
                    {"writ_type": "赔偿", "writ_type_id": "5"},
                    {"writ_type": "知识产权", "writ_type_id": ""},
                ]
                date_str = f"{year}-{month_num}-{day}"
                for item in writ_type_list:
                    writ_type = item["writ_type"]
                    writ_type_id = item["writ_type_id"]
                    data = {
                        "jbfyId": court_id,
                        "startCprq": date_str,
                        "endCprq": date_str,
                        "ajlb": writ_type_id,
                        "page": "1"
                    }
                    yield scrapy.FormRequest(
                        url=self.url, formdata=data, callback=self.parse5,
                        meta={"court_short_name": court_short_name, "court_id": court_id,
                              "date_str": date_str, "writ_type": writ_type, "writ_type_id": writ_type_id})

    def parse5(self, response):
        court_short_name = response.meta["court_short_name"]
        court_id = response.meta["court_id"]
        date_str = response.meta["date_str"]
        writ_type = response.meta["writ_type"]
        writ_type_id = response.meta["writ_type_id"]

        html_str = response.body.decode()
        writ_count = re.search('您搜到了<em>(\d+)</em>条符合条件的文书</span>', html_str)
        if writ_count:
            writ_count = int(writ_count.group(1))
            page = math.ceil(writ_count / 20)
            # 小于200条 分页把任务存入任务队列
            if writ_count <= 200:
                for i in range(1, page + 1):
                    data = {
                        "ajlb": writ_type_id,
                        "jbfyId": court_id,
                        "startCprq": date_str,
                        "endCprq": date_str,
                        "page": i
                    }
                    self.redis_conn.rpush("bjcourt_task_list", json.dumps(data, ensure_ascii=False))
            else:
                keyword_list = ["离婚", "机动车交通事故责任纠纷", "供热供暖", "物业纠纷", "劳动争议", "买卖合同", "民间借贷", "信用卡纠纷", "金融借款", "银行借款"]
                for keyword in keyword_list:
                    data = {
                        "jbfyId": court_id,
                        "startCprq": date_str,
                        "endCprq": date_str,
                        "ajlb": writ_type_id,
                        "prompt": keyword,
                        "page": "1",
                    }
                    yield scrapy.FormRequest(
                        url=self.url, formdata=data, callback=self.parse6,
                        meta={"court_short_name": court_short_name, "court_id": court_id,
                              "date_str": date_str, "writ_type": writ_type, "writ_type_id": writ_type_id,
                              "keyword": keyword})

    def parse6(self, response):
        court_short_name = response.meta["court_short_name"]
        court_id = response.meta["court_id"]
        date_str = response.meta["date_str"]
        writ_type = response.meta["writ_type"]
        writ_type_id = response.meta["writ_type_id"]
        keyword = response.meta["keyword"]

        html_str = response.body.decode()
        writ_count = re.search('您搜到了<em>(\d+)</em>条符合条件的文书</span>', html_str)
        if writ_count:
            writ_count = int(writ_count.group(1))
            page = math.ceil(writ_count / 20)
            # 小于200条 分页把任务存入任务队列
            if writ_count <= 400:
                for i in range(1, page + 1):
                    data = {
                        "prompt": keyword,
                        "ajlb": writ_type_id,
                        "jbfyId": court_id,
                        "startCprq": date_str,
                        "endCprq": date_str,
                        "page": i
                    }
                    self.redis_conn.rpush("bjcourt_task_list", json.dumps(data, ensure_ascii=False))
            else:
                for i in range(1, 20 + 1):
                    data = {
                        "prompt": keyword,
                        "ajlb": writ_type_id,
                        "jbfyId": court_id,
                        "startCprq": date_str,
                        "endCprq": date_str,
                        "page": i
                    }
                    self.redis_conn.rpush("bjcourt_task_list", json.dumps(data, ensure_ascii=False))
