import requests

api_key = 'YOUR_API_KEY'
query = 'domestic violence'
begin_date = '20220101'
end_date = '20241231'
page = 0
pagesize = 10  # 每页返回的文章数量
total_articles = 0

all_articles = []

while total_articles < 100:
    url = f'https://api.nytimes.com/svc/search/v2/articlesearch.json?q={query}&begin_date={begin_date}&end_date={end_date}&page={page}&api-key={api_key}'
    response=requests.get(url)
    data = response.json()
    articles = data['response']['docs']
    if not articles:
        break
    for article in articles:
        title = article.get('headline', {}).get('main', '')
        date = article.get('pub_date', '')
        author = ', '.join([byline.get('original', '') for byline in article.get('byline', {}).get('person', [])])
        section = article.get('section_name', '')
        content = article.get('lead_paragraph', '') + '\n' + article.get('abstract', '')
        keywords = [keyword['value'] for keyword in article.get('keywords', []) if keyword.get('value')]
        all_articles.append({
            'title': title,
            'date': date,
            'author': author,
            'section': section,
            'content': content,
            'keywords': keywords,
            'source': 'The New York Times',
            'retrieval_timestamp': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        })
        total_articles += 1
        if total_articles >= 100:
            break
    page += 1

# 保存为CSV
import pandas as pd
df = pd.DataFrame(all_articles)
df.to_csv('nytimes_domestic_violence_articles.csv', index=False, encoding='utf-8')
