import pandas as pd
from langchain_ollama import OllamaLLM
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from selenium import webdriver
from selenium.common import TimeoutException
from selenium.webdriver.edge.options import Options as EdgeOptions
from selenium.webdriver.edge.service import Service as EdgeService
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from webdriver_manager.microsoft import EdgeChromiumDriverManager
from bs4 import BeautifulSoup
import time
import random
import os

# 初始化Ollama本地模型
llm = OllamaLLM(model="deepseek-r1:14b",base_url="http://localhost:11434")


# 定义知网搜索函数
def search_cnki(keyword, max_papers=10, headless=True):
    """
    完全修复版的知网搜索函数，解决Edge驱动问题

    参数:
        keyword: 搜索关键词
        max_papers: 最大获取论文数量
        headless: 是否使用无头模式

    返回:
        论文信息列表，包含标题、作者、摘要、链接等
    """
    # 1. 配置Edge浏览器选项
    edge_options = EdgeOptions()

    # 添加常用参数
    edge_options.add_argument('--disable-gpu')
    edge_options.add_argument('--no-sandbox')
    edge_options.add_argument('--disable-dev-shm-usage')
    edge_options.add_argument('--window-size=1920,1080')
    edge_options.add_argument('--log-level=3')  # 减少日志输出
    edge_options.add_argument('--disable-blink-features=AutomationControlled')

    # 设置用户代理
    edge_options.add_argument(
        'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36 Edg/91.0.864.59'
    )

    if headless:
        edge_options.add_argument('--headless=new')  # 使用新的headless模式

    # 2. 配置Edge驱动服务
    try:
        # 自动查找Edge驱动路径
        edge_driver_path = EdgeChromiumDriverManager().install()

        # 创建服务
        service = EdgeService(edge_driver_path)

        # 3. 启动Edge浏览器
        driver = webdriver.Edge(service=service, options=edge_options)

    except Exception as e:
        print(f"Edge浏览器初始化失败: {e}")
        return []

    # 4. 设置隐式等待和页面加载超时
    driver.implicitly_wait(15)
    driver.set_page_load_timeout(30)

    papers = []

    try:
        # 5. 访问知网首页
        print("正在访问知网首页...")
        driver.get('https://www.cnki.net/')
        time.sleep(random.uniform(2, 4))

        # 6. 处理可能的弹窗
        try:
            print("尝试关闭弹窗...")
            close_btn = WebDriverWait(driver, 15).until(
                EC.presence_of_element_located((By.CSS_SELECTOR, '.close-btn, .layui-layer-close'))
            )
            driver.execute_script("arguments[0].click();", close_btn)
            time.sleep(1)
        except:
            print("未发现弹窗或关闭失败")
            pass

        # 7. 输入搜索关键词
        print(f"正在搜索关键词: {keyword}")
        try:
            search_input = WebDriverWait(driver, 20).until(
                EC.presence_of_element_located((By.CSS_SELECTOR, '#txt_SearchText'))
            )

            # 清空并输入关键词
            driver.execute_script("arguments[0].value = '';", search_input)

            # 模拟人类输入
            for char in keyword:
                search_input.send_keys(char)
                time.sleep(random.uniform(0.1, 0.3))

            time.sleep(random.uniform(1, 2))
        except Exception as e:
            print(f"搜索框操作失败: {e}")
            raise

        # 8. 执行搜索
        print("正在执行搜索...")
        try:
            search_btn = WebDriverWait(driver, 20).until(
                EC.element_to_be_clickable((By.CSS_SELECTOR, '.search-btn'))
            )

            # 使用多种点击方式确保成功
            try:
                search_btn.click()
            except:
                driver.execute_script("arguments[0].click();", search_btn)

            time.sleep(random.uniform(3, 5))
        except Exception as e:
            print(f"搜索按钮点击失败: {e}")
            raise

        # 9. 等待结果加载（修改后）
        print("等待搜索结果...")
        wait = WebDriverWait(driver, 20)
        try:
            # 等待第一个论文条目【可见】（表格行）
            wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "table.result-table-list tbody tr")))
        except TimeoutException:
            # 兜底检查：元素是否存在
            result_items = driver.find_elements(By.CSS_SELECTOR, "table.result-table-list tbody tr")
            if len(result_items) == 0:
                print(f"结果加载超时: 页面无论文条目")
                raise TimeoutException("结果加载超时")
            else:
                print(f"警告：结果已加载但元素暂时不可见，继续解析（可能CSS遮挡）")
        time.sleep(2)  # 额外等待动态内容

        # 10. 解析结果
        print("开始解析结果...")
        page_num = 1
        # 定义论文条目的CSS选择器（匹配表格中的每一行）
        result_selector = "table.result-table-list tbody tr"

        while len(papers) < max_papers:
            # 1. 获取并解析页面源码
            html = driver.page_source
            soup = BeautifulSoup(html, 'html.parser')

            # 2. 选择论文条目（表格中的<tr>）
            items = soup.select(result_selector)
            if not items:
                print(f"第{page_num}页未找到结果")
                break

            print(f"第{page_num}页找到{len(items)}条结果")

            # 3. 遍历解析每个论文条目
            for item in items:
                if len(papers) >= max_papers:
                    break

                try:
                    # 提取标题：<td class="name"> 下的 <a class="fz14">
                    title_elem = item.select_one('td.name a.fz14')
                    title = title_elem.get_text(strip=True) if title_elem else '无标题'

                    # 提取作者：<td class="author"> 下的所有 <a>
                    authors_elem = item.select('td.author a')
                    authors = [a.get_text(strip=True) for a in authors_elem] if authors_elem else []

                    # 提取链接（标题<a>的href）
                    url = ''
                    if title_elem and 'href' in title_elem.attrs:
                        url = title_elem['href']
                        # 补全相对URL
                        if not url.startswith('http'):
                            url = 'https:' + url if url.startswith('//') else f'https://www.cnki.net{url}'

                    # 提取摘要（调用get_abstract，逻辑不变）
                    abstract = get_abstract(driver, url) if url else ''
                    time.sleep(random.uniform(1, 3))

                    # 存入结果
                    papers.append({
                        'title': title,
                        'authors': authors,
                        'abstract': abstract,
                        'url': url,
                        'source': '知网',
                        'search_keyword': keyword
                    })
                    print(f"已获取 {len(papers)}/{max_papers}: {title[:30]}...")

                except Exception as e:
                    print(f"解析论文条目时出错: {e}")
                    continue

            # 4. 翻页处理（等待新页面的<tr>可见）
            if len(papers) < max_papers:
                try:
                    # 等待下一页按钮可点击
                    next_page = WebDriverWait(driver, 10).until(
                        EC.element_to_be_clickable((By.CSS_SELECTOR, '.next-page:not(.disabled)'))
                    )
                    driver.execute_script("arguments[0].click();", next_page)
                    page_num += 1
                    time.sleep(random.uniform(4, 7))  # 等待页面跳转

                    # 等待新页面的论文条目可见（用统一选择器）
                    WebDriverWait(driver, 15).until(
                        EC.visibility_of_element_located((By.CSS_SELECTOR, result_selector))
                    )
                except TimeoutException:
                    # 兜底检查：翻页后是否有论文条目
                    result_items = driver.find_elements(By.CSS_SELECTOR, result_selector)
                    if not result_items:
                        print(f"翻页后结果加载超时: 页面未找到论文条目")
                        break
                    else:
                        print(f"翻页后警告：结果已加载但元素暂时不可见")
                except Exception as e:
                    print(f"翻页时出错: {e}")
                    break

    except Exception as e:
        print(f"搜索过程中出错: {e}")
        # 保存调试信息        driver.save_screenshot('error.png')
        with open('page_source.html', 'w', encoding='utf-8') as f:
            f.write(driver.page_source)
    finally:
        driver.quit()
        print("浏览器已关闭")

    return papers


def get_abstract(driver, url):
    """获取论文摘要的稳健实现"""
    try:
        # 保存当前窗口
        main_window = driver.current_window_handle

        # 在新标签页打开
        driver.execute_script(f"window.open('{url}');")

        # 切换到新窗口
        new_window = [w for w in driver.window_handles if w != main_window][0]
        driver.switch_to.window(new_window)

        # 等待摘要加载
        try:
            abstract_elem = WebDriverWait(driver, 15).until(
                EC.presence_of_element_located((By.CSS_SELECTOR, '.abstract-text, .abstract'))
            )
            abstract = abstract_elem.text.strip()
        except:
            abstract = ''

        # 关闭当前窗口并返回
        driver.close()
        driver.switch_to.window(main_window)

        return abstract

    except Exception as e:
        print(f"获取摘要时出错: {e}")
        # 确保返回主窗口
        if 'main_window' in locals():
            driver.switch_to.window(main_window)
        return ''


# 摘要解析链 (直接输出Markdown结果)
abstract_analysis_prompt = ChatPromptTemplate.from_template(
    """分析论文摘要并提取：
    1. 研究主题
    2. 研究方法
    3. 主要结论
    4. 创新点

    摘要内容：
    {abstract}

    用Markdown格式返回结果，不要解释。"""
)
analysis_chain = abstract_analysis_prompt | llm | StrOutputParser()

# 作者关联链 (直接输出表格结果)
author_analysis_prompt = ChatPromptTemplate.from_template(
    """根据论文信息分析作者：
    论文标题：{title}
    作者列表：{authors}
    摘要分析：{analysis}

    为每位作者生成：
    1. 研究专长
    2. 可能贡献
    3. 合作关系

    用Markdown表格返回，不要解释。"""
)
author_chain = author_analysis_prompt | llm | StrOutputParser()


# 主处理流程
def process_cnki_search(keyword):
    # 1. 搜索知网获取论文列表
    papers = search_cnki(keyword)
    results = []

    for paper in papers:
        # 2. 解析摘要
        analysis = analysis_chain.invoke({"abstract": paper["abstract"]})

        # 3. 分析作者信息
        author_analysis = author_chain.invoke({
            "title": paper["title"],
            "authors": ", ".join(paper["authors"]),
            "analysis": analysis
        })

        # 4. 保存结果
        result = {
            "title": paper["title"],
            "authors": paper["authors"],
            "abstract": paper["abstract"],
            "analysis": analysis,
            "author_analysis": author_analysis,
            "url": paper["url"]
        }
        results.append(result)

    return results


# 示例使用
if __name__ == "__main__":
    keyword = "人工智能"
    search_results = process_cnki_search(keyword)

    # 保存结果到Excel
    df = pd.DataFrame(search_results)
    df.to_excel("cnki_search_results.xlsx", index=False)
    print("搜索结果已保存到 cnki_search_results.xlsx")