import os
from tkinter import Text
import concurrent.futures
import logging
import random
import re
import threading
import time
import pandas as pd
import requests
from bs4 import BeautifulSoup
import tkinter as tk
from tkinter import filedialog
import tldextract

# 配置logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')

# 创建logger
logger = logging.getLogger(__name__)


class TkApp:
    def __init__(self, master=None):
        self.master = master if master else tk.Tk()
        self.master.title('搜狗资讯')
        # 计算屏幕中心位置
        screen_width = self.master.winfo_screenwidth()
        screen_height = self.master.winfo_screenheight()
        # window_width = 1400  # 你希望的窗口宽度
        # window_height = 700  # 你希望的窗口高度
        window_width = 1000  # 你希望的窗口宽度
        window_height = 600  # 你希望的窗口高度
        # 计算居中位置
        position_top = int(screen_height / 2 - window_height / 2)
        position_right = int(screen_width / 2 - window_width / 2)
        # 设置窗口大小和位置
        self.master.geometry(f"{window_width}x{window_height}+{position_right}+{position_top}")

        self.setup_ui()
        self.bind_closing()

    def setup_ui(self):
        # UI组件的初始化和布局
        self.create_left_frame()
        self.create_right_frame()

    def create_left_frame(self):
        left_frame = tk.Frame(self.master)
        left_frame.pack(side=tk.LEFT, fill=tk.Y, expand=True, padx=10, pady=10)

        file_path_label = tk.Label(left_frame, text="请输入待处理关键词文本的路径：")
        file_path_label.pack(pady=10)

        self.file_path_entry = tk.Entry(left_frame, width=50)
        self.file_path_entry.pack(pady=10)

        file_select_button = tk.Button(left_frame, text='选择文件', command=self.select_file)
        file_select_button.pack(pady=10)

        file_path_label = tk.Label(left_frame, text="代理APi：")
        file_path_label.pack(pady=10)

        self.proxy_ip = tk.Entry(left_frame, width=50)
        self.proxy_ip.pack(pady=10)

        proxy_time_label = tk.Label(left_frame, text="多少秒刷新一次代理IP：")
        proxy_time_label.pack(pady=10)

        self.proxy_time = tk.Entry(left_frame, width=50)
        self.proxy_time.pack(pady=10)
        self.proxy_time.insert(0, "120")

        then_wait_label = tk.Label(left_frame, text="随机等待(为0时不等待，填写整数，单位秒)：")
        then_wait_label.pack(pady=10)

        self.then_wait = tk.Entry(left_frame, width=50)
        self.then_wait.pack(pady=10)
        self.then_wait.insert(0, "1")

        max_flag_label = tk.Label(left_frame, text="最大重试次数：")
        max_flag_label.pack(pady=10)

        self.max_flag = tk.Entry(left_frame, width=50)
        self.max_flag.pack(pady=10)
        self.max_flag.insert(0, "10")

        then_res_label = tk.Label(left_frame, text="请求超时时间(秒)：")
        then_res_label.pack(pady=10)

        self.then_res = tk.Entry(left_frame, width=50)
        self.then_res.pack(pady=10)
        self.then_res.insert(0, "1")

        start_button = tk.Button(left_frame, text='开始', width=10, height=2, command=self.start_run_my_code)
        start_button.pack(padx=10)

        # max_threads_frame = tk.Frame(left_frame)
        # max_threads_frame.pack(pady=10)
        #
        # max_threads_label = tk.Label(max_threads_frame, text="最大线程数：")
        # max_threads_label.pack(side=tk.LEFT, padx=5)
        #
        # self.max_threads_entry = tk.Entry(max_threads_frame, width=10)
        # self.max_threads_entry.pack(side=tk.LEFT, padx=5)
        #
        # start_button = tk.Button(max_threads_frame, text='开始', command=self.start_run_my_code)
        # start_button.pack(side=tk.LEFT, padx=10)

    def create_right_frame(self):
        right_frame = tk.Frame(self.master)
        right_frame.pack(side=tk.RIGHT, fill=tk.Y, expand=True, padx=10, pady=10)

        self.output_text = Text(right_frame, width=130, height=20)
        # 设置一个 tag 名为 'red' 并配置其颜色
        self.output_text.tag_config("red", foreground="red")
        output_scroll = tk.Scrollbar(right_frame, command=self.output_text.yview)
        self.output_text.configure(yscrollcommand=output_scroll.set)
        self.output_text.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
        output_scroll.pack(side=tk.RIGHT, fill=tk.Y)

    def update_output(self, text, tag=""):
        self.output_text.config(state=tk.NORMAL)
        self.output_text.insert(tk.END, str(text), tag)  # 使用 tag
        self.output_text.insert(tk.END, "\n")  # 单独插入换行符，因为它没有 tag
        self.output_text.config(state=tk.DISABLED)
        self.output_text.see(tk.END)

    def run_my_code(self):
        ip_api_url = self.proxy_ip.get()
        scraper = BaiduScraper(ip_api_url)
        keywords = scraper.open_file_dialog(self.file_path_entry.get())
        scraper.refresh_ip_pool_timed()  # 启动定时刷新代理池的定时器
        with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
            futures = [executor.submit(scraper.process_keyword, keyword) for keyword in keywords]
            concurrent.futures.wait(futures)
        print("所有任务已完成")
        self.update_output("所有任务已完成")
        # 任务完成后，取消定时器
        if scraper.timer:
            scraper.timer.cancel()

    def select_file(self):
        file_path = filedialog.askopenfilename()
        if file_path:
            self.file_path_entry.delete(0, tk.END)
            self.file_path_entry.insert(0, file_path)

    def start_run_my_code(self):
        thread = threading.Thread(target=self.run_my_code)
        thread.start()

    def bind_closing(self):
        self.master.protocol("WM_DELETE_WINDOW", self.on_closing)

    def on_closing(self):
        os._exit(0)

    def start_app(self):
        self.master.mainloop()


class BaiduScraper():
    def __init__(self, ip_api_url):
        if not os.path.exists('./查询结果'):
            os.makedirs('./查询结果')
        # 创建一个锁对象
        self.proxy_time = int(app.proxy_time.get())
        self.retry_count = int(app.max_flag.get())
        self.time_wait = int(app.then_wait.get())
        self.then_res = int(app.then_res.get())
        self.file_lock = threading.Lock()
        self.ip_api_url = ip_api_url
        self.ip_pool = []
        self.ip_pool_lock = threading.Lock()
        self.last_refresh_time = time.time()
        self.timer = None
        self.user_agents = [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36 OPR/63.0.3368.43',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362',
            'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; LCTE; rv:11.0) like Gecko',
            'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3722.400 QQBrowser/10.5.3739.400',
            'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0'
        ]

    def get_real_url(self, v_url, headers, proxy):
        """
        根据跳转链接获百度取真实连接
        v_url: 跳转链接
        real_url: 真实链接
        """
        v_url = 'https://www.sogou.com' + v_url
        r = requests.get(v_url, headers=headers, proxies=proxy, allow_redirects=False, timeout=1)
        if r.status_code == 302:
            real_url = r.headers.get("Location")
        else:
            real_url = re.findall("URL='(.*?)'", r.text)[0]
        return real_url

    def get_sou_gou(self, keys, page, proxy):
        sogou_headers = {
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
            'priority': 'u=0, i',
            'referer': 'https://www.sogou.com/web',
            'sec-ch-ua': '"Microsoft Edge";v="125", "Chromium";v="125", "Not.A/Brand";v="24"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"Windows"',
            'sec-fetch-dest': 'document',
            'sec-fetch-mode': 'navigate',
            'sec-fetch-site': 'same-origin',
            'sec-fetch-user': '?1',
            'upgrade-insecure-requests': '1',
            'User-Agent': '{}'.format(random.choice(self.user_agents)),
        }
        sou_gou_result_data = []
        try:
            key = keys
            params = {
                'interation': '1728053249',
                'interV': '',
                'pid': 'sogou-wsse-8f646834ef1adefa',
                'query': f'{key}',
                'page': f'{page}',
                'ie': 'utf8',
                'p': '40230447',
                'dp': '1',
            }
            url = f'http://www.sogou.com/sogou'
            url_prn = f'https://www.sogou.com/sogou?query={key}&page={page}'
            print(url_prn)
            sou_gou_response = requests.get(url, proxies=proxy, headers=sogou_headers, timeout=1, params=params)
            sou_gou_soup = BeautifulSoup(sou_gou_response.text, "html.parser")
            if "此验证码用于确认这些请求" in sou_gou_response.text:
                print('出现验证码---------------------尝试更换IP重试')
                return "None"
            if sou_gou_soup.find('div', class_='results').findAll('div', class_='vrwrap'):
                all_div_list = sou_gou_soup.find('div', class_='results').findAll('div', class_='vrwrap')
                for vrwrap_div in all_div_list:
                    # 查找具有data-url属性的div元素（不论其类名是什么）
                    data_url_divs = vrwrap_div.find_all('div', attrs={'data-url': True})
                    for data_url_div in data_url_divs:
                        try:
                            try:
                                a_text = vrwrap_div.find('p', class_='news-from text-lightgray').find('span').text
                            except:
                                a_text = vrwrap_div.find('div', class_='citeurl').find('span').text
                            a_href = data_url_div['data-url']
                            extracted = tldextract.extract(f'{a_href}')
                            main_a = extracted.domain + '.' + extracted.suffix
                            sou_gou_result_data.append((a_text, a_href, main_a))
                        except:
                            pass
                app.update_output(sou_gou_result_data)
                print(sou_gou_result_data)
                return sou_gou_result_data
        except Exception as e:
            print(e)
            return "None"

    def refresh_ip_pool(self):
        response = requests.get(self.ip_api_url)
        if response.status_code == 200:
            ip_list = response.text.strip()  # 去掉末尾的换行符
            proxy_lines = ip_list.split('\n')  # 分割每一行
            # 转换成代理字典列表，同时去除'\r'字符
            proxy_list = [{'http': f'http://{ip.strip()}', 'https': f'http://{ip.strip()}'} for ip in proxy_lines]
            return proxy_list
        else:
            return []

    def refresh_ip_pool_timed(self):
        with self.ip_pool_lock:
            self.ip_pool = []  # 清空当前代理池
            self.ip_pool.extend(self.refresh_ip_pool())  # 刷新代理池
            logger.info(f"代理池已刷新。当前代理池大小：{len(self.ip_pool)}")
            self.last_refresh_time = time.time()  # 记录刷新时间
        # 重新计算定时器的触发时间
        elapsed_time = time.time() - self.last_refresh_time
        next_interval = max(self.proxy_time - elapsed_time, 0)  # 下一次触发时间为30秒后或者更长的时间
        self.timer = threading.Timer(next_interval, self.refresh_ip_pool_timed)
        self.timer.start()

    def get_unique_ip(self):
        with self.ip_pool_lock:  # 对IP池操作加锁
            if not self.ip_pool:  # 如果IP池为空，则重新获取IP
                self.ip_pool = self.refresh_ip_pool()
            if self.ip_pool:  # 如果IP池非空，则获取第一个IP并删除
                unique_ip = self.ip_pool.pop(0)
                logger.info(len(self.ip_pool))
                # app.update_output(f'当前代理：{self.ip_pool.pop(0)}-------------------{len(self.ip_pool)}', 'red')
                return unique_ip
            else:
                # IP池仍然为空，再次尝试获取
                self.ip_pool = self.refresh_ip_pool()
                if self.ip_pool:
                    unique_ip = self.ip_pool.pop(0)
                    return unique_ip
                else:
                    print("Error: 无法获取新的代理IP")
                    return None

    def save_domains_to_file(self, domains, filename):
        with self.file_lock:  # 获取锁
            with open(filename, 'a', encoding='utf-8') as file:
                if isinstance(domains, list):
                    # domains 是一个列表，循环写入每个字符串
                    for domain in domains:
                        file.write(domain + '\n')
                elif isinstance(domains, str):
                    # domains 是一个字符串，直接写入
                    file.write(domains + '\n')
                else:
                    # domains 既不是列表也不是字符串，抛出异常或进行其他处理
                    raise TypeError("domains must be a list of strings or a single string")

    def process_keyword(self, keyword):
        proxy = self.get_unique_ip()
        bd_df = pd.DataFrame(columns=['中文网址名称', '网址链接', '主域名'])
        for i in range(21):  # 暂时减少循环次数以便测试
            retry_count = self.retry_count
            domains = []  # 这是一个包含主域名的列表
            flag = 0  # 计算重试多少次了
            while retry_count > 0:
                try:
                    bd_result_data = self.get_sou_gou(keyword, page=str(i + 1), proxy=proxy)
                    if bd_result_data != "None":
                        # print(bd_result_data)
                        df_temp = pd.DataFrame(bd_result_data, columns=['中文网址名称', '网址链接', '主域名'])
                        bd_df = pd.concat([bd_df, df_temp])
                        for data in bd_result_data:
                            if len(data) >= 3 and data[2]:  # 检查元组长度并确认主域名非空
                                domains.append(data[2])
                        self.save_domains_to_file(domains, './主域名.txt')
                        if self.time_wait != 0:
                            time.sleep(self.time_wait)
                        break
                    else:
                        proxy = self.get_unique_ip()
                        flag = flag + 1
                        retry_count -= 1
                except Exception as e:
                    print(f"未知异常: {e}")
                    # bd_df.to_excel(f'./百度网页_结果/{keyword}.xlsx', index=False)
                    break
            else:
                print(f"当前关键词==={keyword}===第{i}页已经重试{flag}次，准备跳过")
                app.update_output(f"当前关键词==={keyword}===第{i}页已经重试{flag}次，准备跳过", "red")
                continue
        if ':' in keyword:
            keyword = keyword.replace(":", "_")
        bd_df.to_excel(f'./查询结果/{keyword}.xlsx', index=False)

    @staticmethod
    def open_file_dialog(file_path):
        if file_path:
            with open(file_path, 'r', encoding='utf-8') as file:
                content = file.read().splitlines()

            print("文本文件内容:")
            print(content)
            return content
        else:
            print("未选择文件")
            return []


if __name__ == "__main__":
    app = TkApp()
    app.start_app()
