import pandas as pd
import requests
import json
import time
import random
from tqdm import tqdm
import os

# Configuration constants
API_URL = "https://ieeexplore.ieee.org/rest/search"
HEADERS = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
    "Accept": "application/json",
    "Content-Type": "application/json",
    "Origin": "https://ieeexplore.ieee.org",
    "Referer": "https://ieeexplore.ieee.org/search/advanced"
}
PROGRESS_FILE = "abstract_progress.json"
OUTPUT_DIR = "perfect_abstract"

def get_full_abstract(doi, session, max_retries=3):
    """通过DOI获取完整摘要"""
    for attempt in range(max_retries):
        try:
            params = {
                "newsearch": True,
                "queryText": f'("DOI":"{doi}")',
                "highlight": True,
                "returnFacets": ["ALL"],
                "returnType": "SEARCH",
                "matchPubs": True,
                "rowsPerPage": 1,
                "pageNumber": 1
            }
            
            response = session.post(API_URL, json=params, headers=HEADERS, timeout=30)
            
            if response.status_code == 200:
                data = response.json()
                records = data.get("records", [])
                
                if records:
                    article_number = records[0].get("articleNumber")
                    if article_number:
                        detail_url = f"https://ieeexplore.ieee.org/document/{article_number}/abstract"
                        detail_api_url = f"https://ieeexplore.ieee.org/rest/document/{article_number}/abstract"
                        
                        detail_headers = HEADERS.copy()
                        detail_headers.update({"Referer": detail_url})
                        
                        detail_response = session.get(detail_api_url, headers=detail_headers, timeout=30)
                        
                        if detail_response.status_code == 200:
                            detail_data = detail_response.json()
                            if "abstract" in detail_data:
                                return detail_data["abstract"].replace("\n", " ")
            return None
            
        except requests.exceptions.ConnectionError as e:
            if "NameResolutionError" in str(e):
                print(f"\n域名解析错误，可能是网络问题，等待60秒后退出...")
                time.sleep(60)  # 等待一分钟
                raise  # 重新抛出异常以中断处理
            elif attempt < max_retries - 1:
                wait_time = 30 * (attempt + 1)  # 逐次增加等待时间
                print(f"\n连接错误，等待{wait_time}秒后重试...")
                time.sleep(wait_time)
            else:
                print(f"获取摘要时出错 (DOI: {doi}): {str(e)}")
                raise  # 达到最大重试次数，重新抛出异常
                
        except Exception as e:
            print(f"获取摘要时出错 (DOI: {doi}): {str(e)}")
            raise  # 其他错误直接重新抛出

def load_progress():
    """加载处理进度"""
    if os.path.exists(PROGRESS_FILE):
        with open(PROGRESS_FILE, 'r', encoding='utf-8') as f:
            progress = json.load(f)
            if isinstance(progress, dict):
                if 'completed_files' not in progress:
                    progress['completed_files'] = []
                return progress
    return {
        "current_file": "",
        "last_index": 0,
        "processed_dois": [],
        "completed_files": []
    }

def save_progress(filename, index, processed_dois, completed_files):
    """保存处理进度"""
    with open(PROGRESS_FILE, 'w', encoding='utf-8') as f:
        json.dump({
            "current_file": filename,
            "last_index": index,
            "processed_dois": processed_dois,
            "completed_files": completed_files
        }, f, ensure_ascii=False, indent=2)

def update_abstracts(input_file, output_file, filename, test_rows=None):
    """更新文件中的摘要，支持断点续传和分批处理"""
    print("正在读取文件...")
    
    # 加载进度
    progress = load_progress()
    start_index = progress["last_index"] if progress["current_file"] == filename else 0
    processed_dois = set(progress["processed_dois"]) if progress["current_file"] == filename else set()
    completed_files = set(progress["completed_files"])
    
    try:
        # 读取整个文件到DataFrame
        df = pd.read_csv(input_file)
        df['abstract'] = df['abstract'].fillna('')
        
        # 如果输出文件存在，则加载已有的更新
        if os.path.exists(output_file):
            existing_df = pd.read_csv(output_file)
            # 使用已存在的更新过的摘要更新当前DataFrame
            for idx, row in existing_df.iterrows():
                if not pd.isna(row['abstract']):
                    df.loc[idx, 'abstract'] = row['abstract']
        
        print(f"共读取 {len(df)} 行数据")
        
        session = requests.Session()
        processed_count = 0
        
        # 处理每一行
        for actual_row, (index, row) in enumerate(df.iterrows(), 0):
            if actual_row < start_index:
                continue
                
            doi = row['doi']
            print(f"\n处理第 {actual_row + 1} 行:")
            print(f"DOI: {doi}")
            
            if doi in processed_dois:
                print("已处理过，跳过")
                continue
            
            if pd.isna(doi) or not doi:
                print("没有DOI，跳过")
                continue
            
            print(f"原摘要: {str(row['abstract'])[:100]}...")
            
            # 获取完整摘要
            full_abstract = get_full_abstract(doi, session)
            if full_abstract:
                print(f"新摘要: {full_abstract[:100]}...")
                df.loc[index, 'abstract'] = full_abstract
                processed_count += 1
            else:
                print("获取失败，保持原摘要")
            
            # 更新进度并保存当前结果
            processed_dois.add(doi)
            save_progress(filename, actual_row, list(processed_dois), list(completed_files))
            
            # 每处理10条记录保存一次完整文件
            if processed_count % 10 == 0:
                df.to_csv(output_file, index=False, encoding='utf-8')
                print("已保存当前进度到文件")
            
            time.sleep(2 + random.random() * 3)
        
        # 最后保存一次完整文件
        df.to_csv(output_file, index=False, encoding='utf-8')
        print(f"\n完成! 已更新 {processed_count} 条摘要，数据保存到 {output_file}")
        return True  # 表示处理成功
        
    except (KeyboardInterrupt, Exception) as e:
        print(f"\n{'检测到中断' if isinstance(e, KeyboardInterrupt) else '发生错误'}: {str(e)}")
        print("正在保存进度...")
        if 'df' in locals():
            # 确保输出目录存在
            os.makedirs(os.path.dirname(output_file), exist_ok=True)
            # 保存当前的DataFrame
            df.to_csv(output_file, index=False, encoding='utf-8')
            print(f"已保存当前进度到文件: {output_file}")
        save_progress(filename, actual_row, list(processed_dois), list(completed_files))
        print(f"进度已保存，下次运行将从第 {actual_row + 1} 行继续")
        raise  # 重新抛出异常，让外层捕获

def process_all_files(input_dir="split_papers"):
    """处理split_papers目录下的所有文件"""
    try:
        if not os.path.exists(input_dir):
            print(f"错误: 目录 {input_dir} 不存在")
            return
        
        # 创建输出目录
        if not os.path.exists(OUTPUT_DIR):
            os.makedirs(OUTPUT_DIR)
            print(f"创建输出目录: {OUTPUT_DIR}")
        
        # 获取目录下所有的csv文件（排除_updated文件）
        csv_files = [f for f in os.listdir(input_dir) 
                    if f.endswith('.csv') and not f.endswith('_updated.csv')]
        if not csv_files:
            print(f"在 {input_dir} 目录下没有找到CSV文件")
            return
        
        # 加载进度
        progress = load_progress()
        current_file = progress["current_file"]
        completed_files = set(progress["completed_files"])
        
        # 过滤掉已完成的文件
        remaining_files = [f for f in csv_files if f not in completed_files]
        
        if not remaining_files:
            print("所有文件都已处理完成")
            return
        
        # 如果有未完成的文件，从该文件开始处理
        if current_file and current_file in remaining_files:
            start_idx = remaining_files.index(current_file)
            remaining_files = remaining_files[start_idx:]
            print(f"从上次的文件 {current_file} 继续处理")
        
        print(f"找到 {len(remaining_files)} 个文件需要处理")
        print(f"已完成 {len(completed_files)} 个文件")
        
        for i, input_file in enumerate(remaining_files, 1):
            input_path = os.path.join(input_dir, input_file)
            output_file = input_file.rsplit('.', 1)[0] + '_updated.csv'
            output_path = os.path.join(OUTPUT_DIR, output_file)
            
            print(f"\n处理第 {i}/{len(remaining_files)} 个文件: {input_file}")
            
            try:
                if update_abstracts(input_path, output_path, input_file):
                    completed_files.add(input_file)
                    save_progress("", 0, [], list(completed_files))
                    print(f"文件 {input_file} 处理完成")
                else:
                    print(f"文件 {input_file} 处理未完成，将在下次运行时继续")
                    return  # 使用return而不是break
            except KeyboardInterrupt:
                print("\n检测到用户中断，保存进度后退出...")
                return
            except Exception as e:
                print(f"处理文件 {input_file} 时出错: {str(e)}")
                return  # 使用return而不是break
                
    except KeyboardInterrupt:
        print("\n检测到用户中断，正在退出...")
    except Exception as e:
        print(f"\n发生未知错误: {str(e)}")

if __name__ == "__main__":
    process_all_files()