from langchain.agents import AgentType, initialize_agent, Tool
from langchain_siliconflow import ChatSiliconFlow
from langchain.memory import ConversationBufferMemory
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from datetime import datetime
from dotenv import load_dotenv
import requests
from bs4 import BeautifulSoup
import os

# 加载环境变量
load_dotenv()

# 设置硅基流动API密钥
siliconflow_api_key = os.getenv("SILICONFLOW_API_KEY")
model = os.getenv("model")
base_url = os.getenv("base_url")
if not siliconflow_api_key or siliconflow_api_key == "your-siliconflow-api-key-here":
    raise ValueError("请先在.env文件中设置正确的硅基流动API密钥")

# 初始化LLM
llm = ChatSiliconFlow(temperature=0,
                      siliconflow_api_key=siliconflow_api_key,
                      model=model,
                      base_url=base_url)
def extract_article_content(url):
    """从URL提取文章内容"""
    try:
        # 发送HTTP请求
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        }
        response = requests.get(url, headers=headers, timeout=10)
        response.raise_for_status()
        
        # 使用BeautifulSoup解析HTML
        soup = BeautifulSoup(response.content, 'html.parser')
        
        # 提取标题
        title = soup.find('title')
        title_text = title.get_text().strip() if title else "未找到标题"
        
        # 提取正文 - 尝试多种常见的文章内容选择器
        article_selectors = [
            'article',
            '.article-content',
            '.post-content',
            '.entry-content',
            '.content',
            'main',
            '[role="main"]',
            'div#content'
        ]
        
        article_element = None
        for selector in article_selectors:
            element = soup.select_one(selector)
            if element:
                article_element = element
                break
        
        # 如果没有找到特定元素，使用整个body
        if not article_element:
            article_element = soup.find('body')
        
        # 提取文本内容（保留更多原始样式和结构）
        article_content = ''
        
        # 使用更直接的方法处理内容，确保图片被正确提取
        for element in article_element.descendants:
            if isinstance(element, str):
                # 文本内容，保留原始格式
                text = element.strip()
                if text:
                    article_content += text + '\n'
            elif element.name == 'img':
                # 图片元素，在正文中保留位置标记
                src = element.get('src')
                if src and not src.startswith('data:'):  # 排除base64图片
                    # 处理相对路径
                    from urllib.parse import urljoin
                    if not src.startswith(('http://', 'https://')):
                        src = urljoin(url, src)
                    article_content += f'![图片]({src})\n\n'
                else:
                    # 即使没有src或者src无效，也保留图片位置标记
                    article_content += '[图片]\n\n'
            elif element.name in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
                # 标题元素
                level = int(element.name[1])
                article_content += '\n' + '#' * level + ' '
                for child in element.children:
                    if isinstance(child, str):
                        article_content += child.strip()
                article_content += '\n\n'
            elif element.name == 'br':
                # 换行符
                article_content += '\n'
        
        # 清理内容（保留有意义的结构）
        article_content = '\n'.join([line.strip() for line in article_content.split('\n') if line.strip()])
        
        # 提取发布时间（尝试常见的meta标签）
        publish_time = None
        time_selectors = [
            'meta[property="article:published_time"]',
            'meta[name="publish_date"]',
            'meta[name="date"]',
            'time[datetime]',
            '.publish-date',
            '.post-date'
        ]
        
        for selector in time_selectors:
            element = soup.select_one(selector)
            if element:
                if element.get('content'):
                    publish_time = element.get('content')
                elif element.get('datetime'):
                    publish_time = element.get('datetime')
                elif element.get_text():
                    publish_time = element.get_text().strip()
                if publish_time:
                    break
        
        # 如果meta标签中没有找到发布时间，尝试从正文中提取
        if not publish_time:
            import re
            # 常见的时间格式正则表达式
            time_patterns = [
                r'\d{4}年\d{1,2}月\d{1,2}日',
                r'\d{4}-\d{2}-\d{2}',
                r'\d{4}/\d{2}/\d{2}',
                r'发布于\s*\d{4}[年\\-\\/]\d{1,2}[月\\-\\/]\d{1,2}日?',
                r'发布时间\s*[:：]\s*\d{4}[年\\-\\/]\d{1,2}[月\\-\\/]\d{1,2}日?'
            ]
            
            body_text = soup.find('body').get_text()
            for pattern in time_patterns:
                matches = re.findall(pattern, body_text)
                if matches:
                    publish_time = matches[0]  # 取第一个匹配的时间
                    break
        
        # 图片已经在正文内容中包含了，不需要单独提取
        
        return {
            'title': title_text,
            'content': article_content,  # 完整内容（包含图片）
            'publish_time': publish_time or "未找到发布时间",
            'url': url,
            'status': 'success'
        }
        
    except Exception as e:
        return {
            'status': 'error',
            'error': str(e)
        }

# 文章摘要生成模板
summary_template = """
请根据以下文章内容生成一个简洁的摘要：

标题: {title}

发布时间: {publish_time}

原文链接: {url}

文章内容: {content}

请用中文生成一个简洁的摘要，突出文章的主要内容和关键信息。
"""

def generate_article_summary(article_data):
    """生成文章摘要"""
    prompt = PromptTemplate(
        template=summary_template,
        input_variables=["title", "url", "publish_time", "content"]
    )
    
    llm = ChatSiliconFlow(temperature=0.7,
                          siliconflow_api_key=siliconflow_api_key,
                          model=model,
                          base_url=base_url)
    chain = LLMChain(llm=llm, prompt=prompt)
    
    return chain.run({
        "title": article_data['title'],
        "url": article_data['url'],
        "publish_time": article_data['publish_time'],
        "content": article_data['content'][:1000]  # 限制内容长度
    })

# 创建工具列表
tools = [
    Tool(
        name="Article Extractor",
        func=extract_article_content,
        description="从URL提取文章内容，包括标题、正文、发布时间等信息"
    ),
    Tool(
        name="Summary Generator",
        func=generate_article_summary,
        description="根据提取的文章内容生成详细摘要，输入格式为文章数据字典"
    )
]



# 创建记忆
memory = ConversationBufferMemory(memory_key="chat_history")

# 初始化agent
agent = initialize_agent(
    tools=tools,
    llm=llm,
    agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
    memory=memory,
    verbose=True
)

def extract_and_summarize_article(url):
    """完整的文章提取和摘要生成流程"""
    print(f"正在提取文章内容: {url}")
    
    # 第一步：提取文章内容
    article_data = extract_article_content(url)
    
    if article_data['status'] == 'error':
        return f"提取文章失败: {article_data['error']}"
    
    print(f"文章提取完成，标题: {article_data['title']}")
    print("开始生成摘要...")
    
    # 第二步：生成摘要
    summary = generate_article_summary({
        'title': article_data['title'],
        'url': article_data['url'],
        'publish_time': article_data['publish_time'],
        'content': article_data['content']
    })
    
    # 返回完整结果
    result = {
        'title': article_data['title'],
        'publish_time': article_data['publish_time'],
        'summary': summary,
        'content_preview': article_data['content'],  # 完整内容（包含图片）
        'url': url
    }
    
    return result

# 使用示例
if __name__ == "__main__":
    # 测试文章提取功能
    test_url = "https://www.tsinghua.edu.cn/info/3215/120653.htm"  # 替换为实际的文章URL
    
    try:
        result = extract_and_summarize_article(test_url)
        
        print("\n" + "="*60)
        print("文章提取和摘要结果:")
        print("="*60)
        print(f"标题: {result['title']}")
        print(f"发布时间: {result['publish_time']}")
        print(f"\n摘要:\n{result['summary']}")
        print(f"\n内容预览:\n{result['content_preview']}")
        print(f"\n原文链接: {result['url']}")
        
        # 保存结果到文件
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        
        # 保存为txt格式
        txt_filename = f"article_summary_{timestamp}.txt"
        
        with open(txt_filename, 'w', encoding='utf-8') as f:
            f.write(f"标题: {result['title']}\n")
            f.write(f"发布时间: {result['publish_time']}\n")
            f.write(f"原文链接: {result['url']}\n\n")
            f.write(f"摘要:\n{result['summary']}\n\n")
            f.write(f"内容预览:\n{result['content_preview']}")
        
        # 保存为markdown格式
        md_filename = f"article_summary_{timestamp}.md"
        
        with open(md_filename, 'w', encoding='utf-8') as f:
            f.write(f"# {result['title']}\n\n")
            f.write(f"**发布时间**: {result['publish_time']}\n\n")
            f.write(f"**原文链接**: [{result['url']}]({result['url']})\n\n")
            f.write(f"## 摘要\n\n{result['summary']}\n\n")
            f.write(f"## 正文内容\n\n{result['content_preview']}\n\n")
            # 图片已经在正文内容中包含了，不需要单独列出
        
        print(f"\n结果已保存到: {txt_filename} (文本格式)")
        print(f"结果已保存到: {md_filename} (Markdown格式)")
        
    except Exception as e:
        print(f"发生错误: {e}")
        print("请检查硅基流动API密钥是否正确设置，并确保URL有效")