import requests
import json
import time
import csv
import random
from filelock import FileLock
import os
import traceback

# 配置参数
SEARCH_PARAMS = {
    "newsearch": True,
    "queryText": "",  # 将在代码中动态设置
    "highlight": True,
    "returnFacets": ["ALL"],
    "returnType": "SEARCH",
    "matchPubs": True,
    "matchBoolean": True,  # 添加布尔搜索支持
    "pageNumber": 1,
    "rowsPerPage": 100,
    "sortType": "newest"
}

# 请求头
HEADERS = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
    "Accept": "application/json, text/plain, */*",
    "Content-Type": "application/json",
    "Origin": "https://ieeexplore.ieee.org",
    "Referer": "https://ieeexplore.ieee.org/search/advanced",
    "Accept-Language": "en-US,en;q=0.9"
}

# API端点和文件路径
API_URL = "https://ieeexplore.ieee.org/rest/search"
PROGRESS_FILE = "progress.txt"
OUTPUT_FILE = "ieee_papers.csv"

def save_to_csv(data, append=True):
    """保存数据到CSV文件"""
    if not data:
        return

    file_exists = os.path.exists(OUTPUT_FILE)
    mode = 'a' if append else 'w'
    
    lock = FileLock(f"{OUTPUT_FILE}.lock")
    with lock:
        with open(OUTPUT_FILE, mode, newline='', encoding='utf-8') as f:
            writer = csv.DictWriter(f, fieldnames=data[0].keys())
            if not file_exists or not append:
                writer.writeheader()
            writer.writerows(data)

def load_progress():
    """加载进度"""
    if os.path.exists(PROGRESS_FILE):
        with open(PROGRESS_FILE, 'r') as f:
            return int(f.read().strip())
    return 1

def save_progress(page):
    """保存进度"""
    with open(PROGRESS_FILE, 'w') as f:
        f.write(str(page))

def get_papers_by_issue(volume_range=(70, 71), issue_range=(1, 12)):
    """按卷号和期号获取论文"""
    session = requests.Session()
    total_papers = 0
    
    try:
        # 加载上次的进度
        last_volume = load_progress()
        print(f"从第 {last_volume} 卷开始继续爬取...")
        
        for volume in range(volume_range[1], volume_range[0] - 1, -1):
            # 如果卷号小于上次的进度，跳过
            if volume < last_volume:
                continue
                
            for issue in range(issue_range[0], issue_range[1] + 1):
                print(f"\n开始获取第 {volume} 卷第 {issue} 期的论文...")
                
                # 构建查询语句
                query = (
                    '("Publication Title":"IEEE Transactions on Communications") '
                    f'AND ("Volume":{volume}) AND ("Issue":{issue})'
                )
                SEARCH_PARAMS["queryText"] = query
                SEARCH_PARAMS["pageNumber"] = 1
                
                page = 1
                while True:
                    print(f"正在获取第 {page} 页...")
                    
                    try:
                        response = session.post(
                            API_URL,
                            json=SEARCH_PARAMS,
                            headers=HEADERS,
                            timeout=30
                        )
                        
                        if response.status_code == 200:
                            data = response.json()
                            records = data.get("records", [])
                            
                            if not records:
                                print("没有更多数据")
                                break
                                
                            print(f"获取到 {len(records)} 条记录")
                            
                            # 处理数据
                            page_results = []
                            for record in records:
                                paper_info = {
                                    "volume": volume,
                                    "issue": issue,
                                    "title": record.get("articleTitle", "").strip(),
                                    "authors": "; ".join([author.get("preferredName", "") for author in record.get("authors", [])]),
                                    "year": record.get("publicationYear", ""),
                                    "doi": record.get("doi", ""),
                                    "abstract": record.get("abstract", "").replace("\n", " "),
                                    "keywords": "; ".join(record.get("keywords", [])),
                                    "article_number": record.get("articleNumber", ""),
                                    "start_page": record.get("startPage", ""),
                                    "end_page": record.get("endPage", ""),
                                    "publication_date": record.get("publicationDate", "")
                                }
                                page_results.append(paper_info)
                            
                            if page_results:
                                save_to_csv(page_results)
                                total_papers += len(page_results)
                                print(f"已保存 {total_papers} 条记录")
                            
                            # 检查是否还有下一页
                            if page >= data.get("totalPages", 0):
                                break
                            
                            page += 1
                            time.sleep(2 + random.random() * 3)
                            
                        else:
                            print(f"请求失败: {response.status_code}")
                            if response.status_code == 429:  # Too Many Requests
                                time.sleep(60)
                                continue
                            break
                                # 保存当前进度

                    except Exception as e:
                        print(f"处理数据时出错: {str(e)}")
                        time.sleep(30)
                        continue

                save_progress(volume)
                print(f"进度已保存：第 {volume} 卷第 {issue} 期的爬取")     

                time.sleep(5)
                
    except Exception as e:
        print(f"爬取失败: {str(e)}")
        traceback.print_exc()
    
    print(f"\n爬取完成，共获取 {total_papers} 条记录")

if __name__ == "__main__":
    # 设置要爬取的卷号和期号范围
    get_papers_by_issue(volume_range=(20, 73), issue_range=(1, 12))