import requests
from bs4 import BeautifulSoup
from datetime import datetime
import time

def fetch_daily_papers():
    """抓取arXiv astro-ph.CO类别的最新论文"""
    url = "https://arxiv.org/list/astro-ph.CO/new"
    
    try:
        # 发送HTTP请求
        response = requests.get(url)
        response.raise_for_status()
        
        # 解析HTML内容
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取论文列表
        papers = []
        # 修改为查找所有dt标签（包含论文ID）和相邻的dd标签（包含论文详情）
        dt_items = soup.find_all('dt')
        dd_items = soup.find_all('dd')
        
        for dt, dd in zip(dt_items, dd_items):
            # 提取arXiv编号
            arxiv_id = dt.find('a', title="Abstract")['href'].replace('/abs/', '')
            
            # 提取标题
            title = dd.find('div', class_='list-title').text.replace('Title:', '').strip()
            
            # 提取作者（保留HTML链接）
            authors_div = dd.find('div', class_='list-authors')
            authors = [a.text for a in authors_div.find_all('a')]
            
            # 提取摘要
            abstract = dd.find('p', class_='mathjax').text.strip()
            
            # 提取评论（如果有）
            comments_div = dd.find('div', class_='list-comments')
            comments = comments_div.text.replace('Comments:', '').strip() if comments_div else ""
            
            # 提取主题分类
            subjects_div = dd.find('div', class_='list-subjects')
            subjects = subjects_div.text.replace('Subjects:', '').strip() if subjects_div else ""
            
            papers.append({
                'arxiv_id': arxiv_id,
                'title': title,
                'authors': authors,
                'abstract': abstract,
                'comments': comments,
                'subjects': subjects,
                'fetch_time': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            })
            
        return papers
        
    except Exception as e:
        print(f"抓取arXiv论文时出错: {e}")
        return []

# 在文件顶部添加新的导入
import json
from typing import List, Dict

def summarize_with_deepseek(papers, api_key):
    """使用DeepSeek API总结论文信息"""
    DEEPSEEK_API_URL = "https://api.deepseek.com/v1/chat/completions"
    
    # 构建提示词
    prompt = """你是一位天体物理学专家，请帮我总结以下arXiv论文的主要信息：
    
1. 按研究领域分类整理这些论文
2. 对每篇论文用中文总结：
   - 研究主题
   - 主要方法/技术
   - 关键发现/结论
3. 最后给出整体趋势分析，指出今天astro-ph.CO领域的研究热点

以下是论文信息：
"""
    
    # 添加论文信息到提示词
    for paper in papers:
        prompt += f"\n\n标题: {paper['title']}\n"
        prompt += f"作者: {', '.join(paper['authors'])}\n"
        prompt += f"摘要: {paper['abstract']}\n"
        prompt += f"主题分类: {paper['subjects']}\n"
    
    # 准备API请求
    headers = {
        "Authorization": f"Bearer {api_key}",
        "Content-Type": "application/json"
    }
    
    payload = {
        "model": "deepseek-chat",
        "messages": [
            {"role": "user", "content": prompt}
        ],
        "temperature": 0.7,
        "max_tokens": 6400
    }
    
    try:
        response = requests.post(DEEPSEEK_API_URL, headers=headers, data=json.dumps(payload))
        response.raise_for_status()
        result = response.json()
        return result['choices'][0]['message']['content']
    except Exception as e:
        print(f"DeepSeek API调用失败: {e}")
        return None

def save_papers(papers: list):
    """保存原始论文数据到JSON文件"""
    today = datetime.now().strftime("%Y-%m-%d")
    filename = f"{today}_astro-ph.CO_papers.json"
    
    with open(filename, 'w', encoding='utf-8') as f:
        json.dump(papers, f, indent=2, ensure_ascii=False)
    
    print(f"原始论文数据已保存到文件: {filename}")

def save_summary(summary: str):
    """保存总结到文件"""
    today = datetime.now().strftime("%Y-%m-%d")
    filename = f"{today}_astro-ph.CO_summary.txt"
    
    with open(filename, 'w', encoding='utf-8') as f:
        f.write(f"arXiv astro-ph.CO 每日论文总结 - {today}\n")
        f.write("=" * 50 + "\n\n")
        f.write(summary)
    
    print(f"总结已保存到文件: {filename}")

# 在文件顶部添加新的导入
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart

def send_email(summary: str, sender_email: str, sender_password: str, recipient_emails: list):
    """将总结内容通过邮件发送"""
    today = datetime.now().strftime("%Y-%m-%d")
    subject = f"arXiv astro-ph.CO 每日论文总结 - {today}"
    
    # 创建邮件内容
    msg = MIMEMultipart()
    msg['From'] = sender_email
    msg['To'] = ", ".join(recipient_emails)
    msg['Subject'] = subject
    
    # 添加邮件正文
    body = f"""arXiv astro-ph.CO 每日论文总结 - {today}
============================================

{summary}

--
以上内容由DeepSeek AI总结每日arxiv最新论文摘要产生，可能在翻译，科学性等各方面存在问题，仅供参考！

"""
    msg.attach(MIMEText(body, 'plain'))
    
    try:
        # 连接SMTP服务器并发送邮件
        with smtplib.SMTP_SSL('your.smtp.server', 465) as server:  # 替换为你的SMTP服务器
            server.login(sender_email, sender_password)
            server.sendmail(sender_email, recipient_emails, msg.as_string())
        print(f"邮件已成功发送至 {len(recipient_emails)} 个收件人")
        return True
    except Exception as e:
        print(f"邮件发送失败: {e}")
        return False

# 修改schedule_daily_crawl函数以包含邮件发送功能
def schedule_daily_crawl(sender_email: str, sender_password: str, recipient_emails: list):
    """每天上午8点执行抓取任务并发送邮件"""
    while True:
        now = datetime.now()
        
        if now.hour == 8 and now.minute == 0:
            print(f"{now.strftime('%Y-%m-%d %H:%M:%S')} - 开始抓取arXiv论文...")
            papers = fetch_daily_papers()
            
            if papers:
                print(f"成功抓取到{len(papers)}篇论文")
                # 保存原始论文数据
                save_papers(papers)
                
                # 调用DeepSeek进行总结
                deepseek_api_key = "your_own_api_key" # 替换为你自己的api
                summary = summarize_with_deepseek(papers, deepseek_api_key)
                if summary:
                    print("\n论文总结结果:")
                    print(summary)
                    # 保存总结
                    save_summary(summary)
                    # 发送邮件
                    send_email(summary, sender_email, sender_password, recipient_emails)
                else:
                    print("总结生成失败")
            
            time.sleep(3600)
        else:
            time.sleep(60)

# 修改主函数
if __name__ == "__main__":
    # 测试抓取功能
    #test_papers = fetch_daily_papers()
    
    # 如果要启动定时任务，需要提供邮箱信息和DeepSeek API key
    schedule_daily_crawl(
         sender_email="your_email_address",
         sender_password="your_smtp_server_password",
         recipient_emails=["your_customer_emails"]
     )