import logging
import random
import requests
import json
import time
import pandas as pd
from os import makedirs
from os.path import exists

logging.basicConfig(level=logging.INFO, format='[%(asctime)s] - %(levelname)s: %(message)s')


INDEX_URL = 'https://kd.nsfc.gov.cn/api/baseQuery/completionQueryResultsData'

user_agent = ["Mozilla/5.0 (Windows NT 10.0; WOW64)", 'Mozilla/5.0 (Windows NT 6.3; WOW64)',
              'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
              'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
              'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.95 Safari/537.36',
              'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET '
              'CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; rv:11.0) like Gecko)',
              'Mozilla/5.0 (Windows; U; Windows NT 5.2) Gecko/2008070208 Firefox/3.0.1',
              'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070309 Firefox/2.0.0.3',
              'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070803 Firefox/1.5.0.12',
              'Opera/9.27 (Windows NT 5.2; U; zh-cn)',
              'Mozilla/5.0 (Macintosh; PPC Mac OS X; U; en) Opera 8.0',
              'Opera/8.0 (Macintosh; PPC Mac OS X; U; en)',
              'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.12) Gecko/20080219 Firefox/2.0.0.12 '
              'Navigator/9.0.0.6',
              'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Win64; x64; Trident/4.0)',
              'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)',
              'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; '
              '.NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E)',
              'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Maxthon/4.0.6.2000 '
              'Chrome/26.0.1410.43 Safari/537.1 ',
              'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; '
              '.NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E; '
              'QQBrowser/7.3.9825.400)',
              'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0 ',
              'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.92 '
              'Safari/537.1 LBBROWSER',
              'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; BIDUBrowser 2.x)',
              'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 '
              'TaoBrowser/3.0 Safari/536.11']

df = pd.DataFrame()
RESULTS_DIR = 'data'
exists(RESULTS_DIR) or makedirs(RESULTS_DIR)

def scrape_api(url, keyword, page):
    logging.info('*'*50)
    logging.info('scraping %s: 第 %d 页...', keyword, page)
    payloadData = {
        "complete": "true",
        "conclusionYear": "近五年",
        "fuzzyKeyword": keyword,
        "isFuzzySearch": "true",
        "order": "enddate",
        "ordering": "desc",
        "pageNum": page, 
        "pageSize": 10 #最大只能取到10
    }
    headers = {
        "User-Agent": random.choice(user_agent),
        "Content-Type": "application/json"
    }
    try: 
        response = requests.post(url, data=json.dumps(payloadData), headers=headers)
        if response.status_code == 200:
            return response
        logging.error('get invalid status code %s while scraping %s: 第 %d 页', response.status_code, keyword, page)
    except requests.RequestException:
        logging.error('❌error occurred while scraping %s: 第 %d 页' % (keyword,page), exc_info=True)

def parse_api(resultsData):
    for result in resultsData:
        project = {}
        # 项目id
        project['id'] = result[0]
        # 项目名称
        project['name'] = result[1]
        # 项目批准号
        project['GrantNo'] = result[2]
        # 项目类别
        project['category'] = result[3]
        # 依托单位
        project['SupportingInst'] = result[4]
        # 项目负责人
        project['leader'] = result[5]
        # 资助经费（单位：万元）
        project['funds'] = float(result[6])
        # 批准年度
        project['start'] = result[7]
        # 结题年度
        project['end'] = result[-4]
        # 申请代码
        project['code'] = result[-5]
        # 关键词
        project['keywords'] = result[8]

        achievements = result[10].split(';')
        achievements = [int(i) for i in achievements]
        # 期刊论文数量、会议论文数量、专著数量、(科研)奖励数量、专利数量
        project["journalPapers"], project["conferencePapers"], project["monographs"], project["awards"], project["patents"]= achievements

        yield project


def save_data(data):
    global df
    logging.info('saving data %s', data)
    df = df.append(data, ignore_index=True)
    

def main():
    keyword = "芯片"
    pageNum = 0
    while True:
        index_data = scrape_api(INDEX_URL, keyword, pageNum).json()
        time.sleep(1)
        if len(index_data.get('data').get('resultsData')) == 0:
            logging.warning('⚠ 已经到最后一页！')
            break
        else:
            projects = parse_api(index_data.get('data').get('resultsData'))
            for project in projects:
                project['searchWord'] = keyword
                save_data(project) 
            pageNum += 1
    # 保存到Excel文件
    data_path = f'{RESULTS_DIR}/{keyword}.xlsx'
    global df
    df.to_excel(data_path, index=False, encoding='utf-8')


if __name__ == '__main__':
    main()
