import json
import re

from sam.util.dbUtil import DB, mysql_env_config_dict
from sam.util.fileUtil2 import write_file_quick, read_file_quick
from sam.util.httpUtil2 import BaseHttp, BaseBrowser
from sam.wrapper.CommonWrapper import catch_and_print_exception


class HeiMaoSearchUrl(BaseHttp):
    def __init__(self):
        super().__init__()
        self.begin_date_str = "2020-08-01"
        self.current_page_size = 101
        self.header = {
            "User-Agent": self.user_agent,
            "Referer": "https://tousu.sina.com.cn/index/search/?keywords=%E7%89%A9%E4%B8%9A&t=0"
        }
        self.begin_re = re.compile(r"^try{jQuery\d+_\d+\(")
        self.end_re = re.compile(r"\);}catch\(e\){};$")

    def fetch_url_by_loop(self):
        while self.current_page_size <= 101:
            current_page_url = f"https://tousu.sina.com.cn/api/index/s?callback=jQuery11120579342348777786_1629734154340&keywords=%E7%89%A9%E4%B8%9A&page_size=10&page={self.current_page_size}&_=1629734154368"
            page = self.get(current_page_url, resp_optional="text")
            if page and page.status_code == 200:
                page_content_str = re.sub(self.begin_re, "", page.text)
                page_content_str = re.sub(self.end_re, "", page_content_str)
                page_content_json = json.loads(page_content_str)
                page_data_list = page_content_json["result"]["data"]["lists"]
                page_data_url_list = [[f'https:{page_data["main"]["url"]}'] for page_data in page_data_list]
                print(page_data_url_list)
                write_file_quick(data_list=page_data_url_list, export_file_name="物业投诉", optional="append")
            self.current_page_size += 1

    def search(self):
        self.fetch_url_by_loop()


class HeiMaoSearchDetail(BaseBrowser):
    def __init__(self):
        super().__init__()
        self.db = DB(mysql_env_config_dict.get("net"), level="debug")

    def __del__(self):
        if self.db:
            self.db.__del__()

    def parse_li_element(self, li_element):
        element = self.parse_li_element_with_try(li_element)
        return element if element else ""

    @catch_and_print_exception
    def parse_li_element_with_try(self, li_element):
        return li_element.text.split("\n")[1]

    def parse_response(self):
        item = {}
        li_elements = self.browser.find_elements_by_css_selector(".ts-q-list > li")
        item["complaint_sn"] = self.parse_li_element(li_elements[0])
        item["complaint_to"] = self.parse_li_element(li_elements[1])
        item["complaint_focus"] = self.parse_li_element(li_elements[2])
        item["complaint_appeal"] = self.parse_li_element(li_elements[3])
        item["complaint_amount"] = self.parse_li_element(li_elements[4])
        item["complaint_status"] = self.parse_li_element(li_elements[5])
        span_elements = self.browser.find_elements_by_css_selector("span.u-date")
        item["complaint_time"] = span_elements[3].text
        content_elements = self.browser.find_elements_by_css_selector("div.ts-d-cont > div")
        item["complaint_content"] = content_elements[-1].text
        title_element = self.browser.find_element_by_css_selector(" div.ts-d-question > h1")
        item["complaint_title"] = title_element.text
        return item

    def fetch_all_url_list(self):
        url_list = read_file_quick(file_type="csv")
        # url_list = [
        #     'https://tousu.sina.com.cn/complaint/view/17353006516/'
        # ]
        for url in url_list:
            self.get(url)
            item = self.parse_response()
            if item:
                self.db.batch_update_by_item_list(
                    item_list=[item]
                    , table_name="heimao_complaint_detail"
                    , optional="BatchInsert"
                    , batch_size=1
                )


if __name__ == "__main__":
    bo = HeiMaoSearchDetail()
    bo.fetch_all_url_list()
