import random
import tkinter as tk

from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from tkinter import ttk
import akshare as ak
import threading
import time
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from fake_useragent import UserAgent
import re

from selenium.webdriver.support.wait import WebDriverWait


# stock_monitor.py 中添加资源处理函数
import sys
import os

def resource_path(relative_path):
    """ 获取资源的绝对路径 """
    try:
        base_path = sys._MEIPASS  # PyInstaller临时文件夹
    except AttributeError:
        base_path = os.path.abspath("..")
    return os.path.join(base_path, relative_path)

# 使用示例
icon_path = resource_path("../assets/icon.ico")

class StockMonitor:
    def __init__(self, root):
        self.root = root
        root.title("A股实时情绪指标监控工具(云梦路小贤开发内部专用)")
        root.configure(bg='red')
        root.geometry("1350x50")

        # 创建水平滚动容器
        self.container = tk.Canvas(root, bg='red', highlightthickness=0)
        self.scrollbar = ttk.Scrollbar(root, orient="horizontal", command=self.container.xview)
        self.scroll_frame = tk.Frame(self.container, bg='red')

        self.container.configure(xscrollcommand=self.scrollbar.set)
        self.container.pack(side="top", fill="x", expand=True)
        self.scrollbar.pack(side="bottom", fill="x")

        self.container.create_window((0, 0), window=self.scroll_frame, anchor="nw")

        # 初始化数据标签
        self.labels = {}
        columns = [
            '上证指数', '创业板指', '上涨家数', '下跌家数',
            '成交量', '涨停家数', '跌停家数', '封板率',
            'A50期指', '昨首板', '昨连板'
        ]

        for col in columns:
            frame = tk.Frame(self.scroll_frame, bg='red', padx=5)
            label = tk.Label(frame, text=f"{col}: --", font=('微软雅黑', 12),
                             fg='white', bg='red')
            label.pack()
            frame.pack(side='left', fill='x')
            self.labels[col] = label

        # 自动调整宽度
        self.scroll_frame.update_idletasks()
        self.container.config(scrollregion=self.container.bbox("all"))

        # 启动数据更新线程
        self.update_data()
        self.schedule_update()

    def schedule_update(self):
        threading.Thread(target=self.periodic_update, daemon=True).start()

    def periodic_update(self):
        while True:
            self.update_data()
            time.sleep(10)  # 10秒更新一次

    def safe_extract(self, content: str):
        """安全数据提取函数"""
        try:
            pattern = r"\d+"
            match = re.search(pattern, content.replace(",", ""))
            return int(match.group()) if match else 0
        except:
            return 0

    def update_data(self):
        try:
            # 获取主要指数数据
            index_em_df = ak.stock_zh_index_spot_em('沪深重要指数')
            sh_index = index_em_df[index_em_df['代码'] == '000001']['最新价'].values[0]
            sh_index_change = index_em_df[index_em_df['代码'] == '000001']['涨跌幅'].values[0]
            sh_index_money = index_em_df[index_em_df['代码'] == '000001']['成交额'].values[0]
            sz_index_money = index_em_df[index_em_df['代码'] == '399001']['成交额'].values[0]
            cyb_index = index_em_df[index_em_df['代码'] == '399006']['最新价'].values[0]

            cyb_index_change = index_em_df[index_em_df['代码'] == '399006']['涨跌幅'].values[0]

            # # 获取市场统计数据的改进方案
            # ths_soup = self.get_ths_data()
            # market_container = ths_soup.select_one(".hcharts-list")
            #
            # # 获取涨停数据（使用新接口）
            # zt_count = ak.stock_zt_pool_em().shape[0]
            # dt_count = ak.stock_zt_pool_dt_em().shape[0]
            #
            # # 获取涨跌家数
            # sz_stats = ak.stock_szse_sector_summary(symbol="当日")
            # sh_stats = ak.stock_sse_summary()
            #
            # up = market_container.select_one(".cur .c-rise")
            # down = int(sh_stats['下跌数'].values[0]) + int(sz_stats['下跌家数'].values[0])
            #
            # # 获取成交量（亿）
            # volume = round((float(sh_stats['成交金额'].values[0]) +
            #                 float(sz_stats['成交金额'].values[0])) / 10000, 1)
            #
            # # 获取涨停数据
            # zt_df = ak.stock_zt_pool_em()
            # zt_count = zt_df.shape[0]
            # dt_count = ak.stock_zt_pool_em(date='20231115').shape[0]  # 需要处理日期
            #
            # # 封板率（示例计算）
            # fbl = round((zt_count / (zt_count + dt_count)) * 100, 1) if (zt_count + dt_count) > 0 else 0
            #
            # # A50期指
            # a50_df = ak.stock_zh_futures_spot()
            # a50 = a50_df[a50_df['symbol'] == 'A50期指']['最新价'].values[0]
            #
            # # 昨日指数（需要爬虫备用方案）
            # zs_data = self.get_yesterday_data()

            # 更新界面
            self.root.after(0, self.update_gui, {
                '上证指数': f"{sh_index:.2f}: {sh_index_change:.2f}%",
                '创业板指': f"{cyb_index:.2f}: {cyb_index_change:.2f}%",
                # '上涨家数': up,
                # '下跌家数': down
                '成交量': f"{sz_index_money+sh_index_money}亿",
                # '涨停家数': zt_count,
                # '跌停家数': dt_count,
                # '封板率': f"{fbl}%",
                # 'A50期指': f"{a50:.2f}",
                # '昨首板': zs_data.get('zsb', '--'),
                # '昨连板': zs_data.get('zlb', '--')
            })

        except Exception as e:
            print("数据获取失败:", e)

    def get_yesterday_data(self):
        """备用爬虫方案获取昨日数据"""
        try:
            url = "https://data.eastmoney.com/stock/tradedetail.html"
            r = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})
            soup = BeautifulSoup(r.text, 'html.parser')
            # 需要根据实际页面结构解析数据
            # 这里只是示例，实际需要具体解析
            return {'zsb': '56', 'zlb': '23'}
        except:
            return {'zsb': '--', 'zlb': '--'}

    def get_market_stats(self):
        """改进的市场统计数据获取方案"""
        try:
            # 使用新接口获取沪深市场数据
            sz_df = ak.stock_sse_deal_daily()
            sh_df = ak.stock_szse_summary()

            return {
                'up': int(sh_df['上涨数'].iloc[0]) + int(sz_df['上涨家数'].iloc[0]),
                'down': int(sh_df['下跌数'].iloc[0]) + int(sz_df['下跌家数'].iloc[0]),
                'volume': round((float(sh_df['成交金额'].iloc[0]) +
                                 float(sz_df['成交金额'].iloc[0])) / 10000, 1)
            }
        except Exception as e:
            pass
            # 备用爬虫方案
            # return self.fallback_market_stats()

    def get_a50_data(self):
        """获取富时A50的改进方案"""
        try:
            futures_df = ak.futures_zh_spot(subscribe_list=['CFF_CN300'])
            return futures_df.iloc[0]['最新价']
        except:
            return self.fallback_a50_data()

    def update_gui(self, data):
        for key in self.labels:
            value = data.get(key, '--')
            self.labels[key].config(text=f"{key}: {value}")

    def get_ths_data(self):
        driver = None
        try:
            options = Options()
            options.add_argument("--headless")
            options.add_argument("--disable-blink-features=AutomationControlled")
            options.add_experimental_option("excludeSwitches", ["enable-automation"])
            options.add_argument(f"user-agent={UserAgent().random}")
            options.add_argument("--window-size=1920,1080")
            driver = webdriver.Chrome(options=options)
            print("正在访问同花顺...")
            driver.get("http://q.10jqka.com.cn/")

            # 先访问一次首页建立cookie
            # driver.get("https://www.10jqka.com.cn")
            time.sleep(random.uniform(1, 3))
            # 注入JS代码隐藏WebDriver特征
            driver.execute_script(
                "Object.defineProperty(navigator, 'webdriver', {get: () => undefined})"
            )
            # 模拟人类操作
            driver.execute_script("window.scrollBy(0, 500)")
            time.sleep(random.uniform(0.5, 1.5))

            WebDriverWait(driver, 20).until(
                EC.presence_of_element_located((By.CLASS_NAME, "hcharts-list")))

            # 保存页面源码用于调试
            with open("../debug_page.html", "w", encoding="utf-8") as f:
                f.write(driver.page_source)
            print("页面已保存至 debug_page.html")

            # WebDriverWait(driver, 15).until(
            #     EC.presence_of_element_located((By.CLASS_NAME, "hcharts-list"))
            # )
            soup = BeautifulSoup(driver.page_source, 'lxml', from_encoding="utf-8")
            print(soup.select(".c-rise")[0].text)
            return soup
            # return self.parse_ths_page(soup)
        except Exception as e:
            print(f"Selenium 获取数据失败: {str(e)}")
            raise
        finally:
            if driver:
                driver.quit()

if __name__ == "__main__":
    root = tk.Tk()
    app = StockMonitor(root)
    root.mainloop()