
from crawler import SeleniumUtil
from selenium.webdriver.common.by import By
import time,datetime
from utils.logger import logger
from models.article import Article
from models.database import db
import uuid

class JgCrawler:
    def __init__(self):
        self.url = ''
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36'
        }
        # self.webDriver = SeleniumUtil.get_webdriver_header()
        self.webDriver = SeleniumUtil.get_webdriver()

    def grap_jg_page(self,start_year:int=2003,end_year:int=2026):
        """
        获取页面
        :param url:
        :return:
        http://iweicha.com/smp/smp10.aspx?sys_code=MS599&def=bpc.xml&version=0 最新
        http://iweicha.com/smp/smp10.aspx?sys_code=MS599&def=fest.xml&version=0 主页
        """
        try:
            # url = "http://iweicha.com/smp/smp10.aspx?sys_code=MS599&def=fest.xml&version=0"
            # self.webDriver.get(url)
            # time.sleep(2)
            # self.webDriver.find_element(by=By.LINK_TEXT , value="处罚案例").click()
            # time.sleep(2)
            # self.webDriver.find_element(by=By.LINK_TEXT , value="最新处罚案例").click()

            for i in range(start_year, end_year):
                keyStr = str(i) + '-'

                url = "http://iweicha.com/smp/smp10.aspx?sys_code=MS599&def=bpc.xml&fun_type=7&version=0"
                self.webDriver.get(url)

                time.sleep(3)
                self.webDriver.find_element(by=By.ID , value="txt_search").send_keys(keyStr)

                time.sleep(1)
                self.webDriver.find_element(by=By.ID , value="btn_000-1_search").click()

                time.sleep(3)
                # 查询结果：5474
                search_res = self.webDriver.find_element(by=By.ID , value="lbl_search_res").text
                logger.info(search_res)
                page  = search_res.split("：")[-1]
                pageIndex = int(int(page)/50)
                # mhttp://iweicha.com/smp/smp10.aspx?sys_code=MS599&def=bpc.xml&fun_type=7&version=0&form_id=000-1&smp_type=display_table&smp_tb_name=dirdata&smp_index=0#next_page
                curentUrl = self.webDriver.current_url
                curentUrl = curentUrl.replace("smp_index=0", "smp_index=" + str(pageIndex))
                logger.info(curentUrl)
                self.webDriver.get(curentUrl)
                time.sleep(6)
                # 获取所有tr元素
                table = self.webDriver.find_element(By.XPATH, '//*[@id="dir_list001-1"]/table/tbody')
                logger.info("table004")
                # logger.info("table",table,table.text)
                rows = table.find_elements(By.TAG_NAME, "tr")
                logger.info("table005")
                # logger.info("rows",rows,rows[0].text)
                links_data = []
                for row in rows:
                    logger.info("row = ",row.text)
                    # 获取当前行内的<a>标签
                    try:
                        if row.text.strip() == '' :
                            continue
                        aElement = row.find_element(by=By.TAG_NAME, value="a")
                        href = aElement.get_attribute("href")
                        text = aElement.text.strip()
                        year = int(text[:4])
                        realease_date = text[:10].strip()
                        title = text[11:].strip()
                        if href and text and year == i:  # 过滤空数据
                            links_data.append({"href": href, "title": title,"year":year,"realease_date":realease_date})
                    except Exception as e:
                        logger.error(e)
                # 输出结果
                for item in links_data[1921:]:
                    print(f"文本: {item['title']}\n链接: {item['href']}\n")
                    self.webDriver.get(item['href'])
                    time.sleep(2)
                    table = self.webDriver.find_element(By.XPATH, '//*[@id="Panel3"]')
                    item["content"] = table.text
                    # logger.info(item["content"])
                    full_html = table.get_attribute("outerHTML")
                    logger.info(full_html)
                    item["origin_content"] = full_html
                    # self.webDriver.close()
                    save_to_db(item)
                    logger.info("保存数据库成功")
                time.sleep(1)
            time.sleep(3)
            self.webDriver.quit()
        except Exception as e:
            logger.error(f"grap_jg_page: {e}")
            self.webDriver.quit()


    def grap_jg_file_page(self,start_year:int=2003,end_year:int=2026):
        """
        获取页面
        :param url:
        :return:
        """
        try:
            for i in range(start_year, end_year):
                keyStr = str(i)
                url = "http://iweicha.com/smp/smp10.aspx?sys_code=MS599&def=ffs.xml&type=1&fun_type=1-003&version=0"
                self.webDriver.get(url)

                time.sleep(3)
                txtFileYear = self.webDriver.find_element(by=By.ID , value='txt_file_year')
                txtFileYear.clear()
                txtFileYear.send_keys(keyStr)

                time.sleep(1)
                self.webDriver.find_element(by=By.ID , value="btn_1-003_search").click()

                time.sleep(3)
                # 查询结果：5474
                search_res = self.webDriver.find_element(by=By.ID , value="lbl_serach_res").text
                logger.info(search_res)
                page  = search_res.split("：")[-1]
                pageIndex = int(int(page)/50)
                # mhttp://iweicha.com/smp/smp10.aspx?sys_code=MS599&def=bpc.xml&fun_type=7&version=0&form_id=000-1&smp_type=display_table&smp_tb_name=dirdata&smp_index=0#next_page
                curentUrl = self.webDriver.current_url
                curentUrl = curentUrl.replace("smp_index=0", "smp_index=" + str(pageIndex))
                logger.info(curentUrl)
                self.webDriver.get(curentUrl)
                time.sleep(6)
                # 获取所有tr元素
                table = self.webDriver.find_element(By.XPATH, '//*[@id="tb_001"]/table/tbody')
                logger.info("table004")
                # logger.info("table",table,table.text)
                rows = table.find_elements(By.TAG_NAME, "tr")
                logger.info("table005")
                # logger.info("rows",rows,rows[0].text)
                links_data = []
                for row in rows:
                    # logger.info("row = ",row.text)
                    # 获取当前行内的<a>标签
                    try:
                        if row.text.strip() == '' :
                            continue
                        aElement = row.find_element(by=By.TAG_NAME, value="a")
                        href = aElement.get_attribute("href")
                        text = aElement.text.strip()
                        year = i
                        realease_date = None
                        title = text
                        if href and text and year == i:  # 过滤空数据
                            links_data.append({"href": href, "title": title,"year":year,"realease_date":realease_date})
                    except Exception as e:
                        logger.error(e)

                # 输出结果
                for item in links_data:
                    print(f"文本: {item['title']}\n链接: {item['href']}\n")
                    self.webDriver.get(item['href'])
                    time.sleep(2)
                    table = self.webDriver.find_element(By.ID, 'content')
                    item["content"] = table.text
                    # logger.info(item["content"])
                    full_html = table.get_attribute("outerHTML")
                    logger.info(full_html)
                    item["origin_content"] = full_html
                    # self.webDriver.close()
                    save_to_db(item,2)
                    logger.info("保存数据库成功")
                time.sleep(1)
            time.sleep(3)
            self.webDriver.quit()
        except Exception as e:
            logger.error(f"grap_jg_page: {e}")
            self.webDriver.quit()

def save_to_db(data,type=1):
    article = Article()
    article.id = str(uuid.uuid1())
    article.title = data['title']
    article.content = data['content']
    article.add_time = datetime.datetime.now()
    article.type = type
    article.year = data['year']
    article.origin_content = data['origin_content']
    article.realease_date = data['realease_date']
    db.session.add(article)
    db.session.commit()

    pass

    