import logging
from warnings import catch_warnings
import mysql.connector ##  MySQL 用户使用了新的认证插件 caching_sha2_password，但 mysql-connector-python 这个库 不支持这种认证方式（默认只支持 mysql_native_password）。
from bs4 import BeautifulSoup
import cryptography
import pymysql
from pymysql.err import MySQLError
import requests
from model.quote import Quote
from sqlalchemy.ext.asyncio import AsyncSession
from core.db import SessionLocal
from model.base_model import BaseModel
from time import time,sleep,strftime,localtime
from random import randint

class BaseCrawler:
    def __init__(self):
        self.try_time = 0
        print("start base crawl")
        self.session: AsyncSession | None = None
        # try:
        #     # 创建数据库连接
        #     db = pymysql.connect(
        #         host="127.0.0.1",
        #         user="root",
        #         password="zhituyunchuang",
        #         port=3316,
        #         database="test"
        #     )
        #
        #     # 执行简单查询来确认连接
        #     with db.cursor() as cursor:
        #         cursor.execute("SELECT VERSION();")
        #         version = cursor.fetchone()
        #         print("✅ 数据库连接成功，MySQL 版本：", version[0])
        #
        # except MySQLError as e:
        #     print("❌ 数据库连接失败，错误信息：", e)
        #
        # finally:
        #     # 安全关闭连接
        #     if 'db' in locals() and db.open:
        #         db.close()
        #         print("数据库连接已关闭。")
        # self.db = pymysql.connect(
        #     host="127.0.0.1",  # 数据库主机地址
        #     user="root",  # 数据库用户名
        #     passwd="zhituyunchuang",  # 数据库密码
        #     port=3316,  ##
        #     database="test" # 数据库
        # )
        # if self.db.connect():
        #     print("connected")
        # else:
        #     print("connect error")
        #ignore
    async def connect_db(self):
        try:
            self.session = SessionLocal()
            async with self.session.begin():
                pass
        except Exception as e:
            logging.error(f"数据库链接失败：{e}")
            raise RuntimeError("数据库链接失败，程序终止")
    async def close_db(self):
        if self.session:
            await self.session.close()
    # python递归深度最大是1000 次 当然可以使用 sys.setrecursionlimit(xxx)
    async def fetch_data(self, page_no):
        if self.try_time > 100:
            print("尝试重试次数过多")
            return
        url = 'https://mingyan.supfree.net/panasonic.asp'  # 抓取bing搜索引擎的网页内容
        print("start  crawl " + url + " the page of " + str(page_no))
        payload = {}
        headers = {
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,de-DE;q=0.7,de;q=0.6,en-US;q=0.5',
            'cache-control': 'no-cache',
            'pragma': 'no-cache',
            'priority': 'u=0, i',
            'sec-ch-ua': '"Google Chrome";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"Windows"',
            'sec-fetch-dest': 'document',
            'sec-fetch-mode': 'navigate',
            'sec-fetch-site': 'none',
            'sec-fetch-user': '?1',
            'upgrade-insecure-requests': '1',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36',
            'Cookie': 'ASPSESSIONIDAARTTRRQ=FDONMOBCABFPKKLAPJOGPGPH'
        }
        response = requests.get(url, params={"page":page_no}, headers=headers, data=payload)
        response.encoding = 'gb2312' ## utf-8
        try:
            # 确保请求成功
            data_list:list[Quote] = []
            if response.status_code == 200:
                # 使用BeautifulSoup解析网页内容
                soup = BeautifulSoup(response.text, 'html.parser')
                # 查找<title>标签
                table_data = soup.find('table').find_all('tr')
                # 打印标题文本
                flag = 0
                if table_data and len(table_data) > 0:
                    for table in table_data:
                        content = table.find_all('td')
                        quotes = content[0].text
                        author = content[1].text
                        text = (quotes + " ——" + author)
                        if len(text) <=255:
                            data_list.append(Quote(text=text))
                    flag = 1
                else:
                    print("未找到<title>标签")
                response.close()
                if flag:
                    await self.save_data(data_list)
                    # return
                    await self.fetch_data(page_no + 1)
            else:
                print("请求失败，状态码：", response.status_code)
        except Exception as e:
            s = f"爬取数据异常：{e}"
            if s.count("mingyan.supfree.net"):
                print("重新爬取 ","")
                self.try_time = self.try_time+1
                sleep(randint(3, 10))
                await self.fetch_data(page_no)
            else:
                print(s)

        finally:
            self.try_time = 0
            response.close()

    async def fetch_data_one(self,page_no)->(bool,list[Quote]):
        url = 'https://mingyan.supfree.net/panasonic.asp'  # 抓取bing搜索引擎的网页内容
        print("start  crawl " + url + " the page of " + str(page_no))
        payload = {}
        headers = {
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,de-DE;q=0.7,de;q=0.6,en-US;q=0.5',
            'cache-control': 'no-cache',
            'pragma': 'no-cache',
            'priority': 'u=0, i',
            'sec-ch-ua': '"Google Chrome";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"Windows"',
            'sec-fetch-dest': 'document',
            'sec-fetch-mode': 'navigate',
            'sec-fetch-site': 'none',
            'sec-fetch-user': '?1',
            'upgrade-insecure-requests': '1',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36',
            'Cookie': 'ASPSESSIONIDAARTTRRQ=FDONMOBCABFPKKLAPJOGPGPH'
        }
        response = requests.get(url, params={"page": page_no}, headers=headers, data=payload)
        response.encoding = 'gb2312'  ## utf-8
        try:
            data_list:list[Quote] = []
            if response.status_code == 200:
                # 使用BeautifulSoup解析网页内容
                soup = BeautifulSoup(response.text, 'html.parser')
                # 查找<title>标签
                table_data = soup.find('table').find_all('tr')
                # 打印标题文本
                if table_data and len(table_data) > 0:
                    for table in table_data:
                        content = table.find_all('td')
                        quotes = content[0].text
                        author = content[1].text
                        text = (quotes + " ——" + author)
                        if len(text) <=255:
                            data_list.append(Quote(text=text))
                    self.try_time = 0 ##爬取成功需要标识为0
                else:
                    print("页面异常")
                response.close()
                return True,data_list
        except Exception as e:
            s = f"爬取数据异常：{e}"
            if s.count("mingyan.supfree.net"):
                print("待重新爬取 ", "")
                return True,[]
            else:
                raise RuntimeError(f"⚠️ 抓取第 {page_no} 页失败: {s}")
        finally:
            print(f"执行结束finally 第{str(page_no)}页")
            if 'response' in locals():
                response.close()
    # 使用 while true的方式进行爬取 对应的就是爬取指定页数据
    async def fetch_data_loop(self):
        page_no = 1
        old_str = ""
        while True:
            try:
                has_next,data = await self.fetch_data_one(page_no)
                if not has_next:
                    print("✅ 所有页面抓取结束")
                    break
                new_str = ""
                if len(data) > 0:
                    for quote in data:
                        new_str += quote.text
                    if new_str == old_str:
                        print("✅ 所有页面抓取结束")
                        break
                    old_str = new_str
                    await self.save_data(data)
                    page_no = page_no+1
                else:
                    #没有数据但是又没结束应该是内部有问题了
                    self.try_time = self.try_time + 1
                    sleep(randint(2, 6))  # 等待几秒后重试
            except Exception as e:
                self.try_time +=1
                if self.try_time > 5:
                    print(f"❌ 连续失败 {self.try_time} 次，终止任务: {e}")
                    break
                print(f"⚠️ 抓取失败{e}，第 {page_no} 页将进行重试 ({self.try_time}/5)")
                sleep(randint(2, 6))  # 等待几秒后重试

    async def save_data(self, data):
        now = int(time())
        MAX_TEXT_LEN = 255
        data_dicts = [{"text": item.text,"created_at":now,"updated_at":now} for item in data  if len(item.text) <= MAX_TEXT_LEN]
        await Quote.bulk_insert_ignore(self.session,data_dicts)
        await self.session.commit()

    async def run(self):
        print(f"执行定时任务于：{strftime("%Y-%m-%d %H:%M:%S", localtime())}")
        try:
            await self.connect_db()
            await self.fetch_data_loop()
        except Exception as e:
            logging.error(f"爬虫执行失败: {e}")
        finally:
            await self.close_db()
    # def print_hi(self,name):
    #     print(f'hi pi{name}')