"""
TODO: 多线程/多进程，同时爬取多页
"""

import requests
import os
import sys
import re
import multiprocessing
import tools.multiprocesses as mp
from bs4 import BeautifulSoup
from utils import charset_finder

sys.path.append("../有道翻译/direct.py")
from 有道翻译.direct import get_translate_text


def generate_search_sentence(mode=0):
    search_key_str = []
    if mode != 0:
        words_list = input('请输入搜索语句  搜索词1,搜索词2,...（输在一行，每个词用逗号隔开）  \n: ')
        word_list = re.split(r'[,\uff0c]', words_list)
        for i in range(len(word_list)):
            word_list[i] = get_translate_text(word_list[i], 2)
        if len(word_list) == 1:
            search_key_str.append(word_list[0])
        else:
            str = word_list[0]
            for i in range(1, len(word_list)):
                str = str + ' AND ' + word_list[i]
            search_key_str.append(str)
        print(f'{search_key_str = }')
    else:
        words_list = input('请输入搜索语句  搜索词1,搜索词2,...（输在一行，每个词用逗号隔开）  \n: ')
        search_key_str = re.split(r'[,\uff0c]', words_list)
    return search_key_str


def get_paper(keyword, page_start, page_end, cookies, headers, base_url):
    link_list = []
    title_list = []
    abstract_list = []
    time_list = []
    for page in range(page_start, page_end + 1):
        page = str(page)
        params = {
            'new-search': 'true',
            'query': keyword,
            'sortBy': 'relevance',
            'page': page,
        }
        response = requests.get('https://link.springer.com/search', params=params, cookies=cookies, headers=headers)
        soup = BeautifulSoup(response.text, 'html.parser')

        charset = charset_finder(soup)
        soup = BeautifulSoup(response.content.decode(charset), 'html.parser')
        ol = soup.find('ol', class_='u-list-reset')
        li_list = ol.find_all('li')

        for li in li_list:
            time_list.append(li.find('span', class_='c-meta__item', attrs={'data-test': "published"}).text)
            link = li.find('a')['href']
            if not link.startswith('http'):
                link = base_url + link
            link_list.append(link)
            title_list.append(li.find('span').text)
            p = None
            div = li.find('div', class_='app-listing__intro')
            if div:
                p = div.find('p')
            if p:
                abstract_list.append(p.text)

            elif not p:
                try:
                    abstract_list.append(div.text)
                except:
                    abstract_list.append('')
            if len(link_list) != len(title_list) or len(title_list) != len(abstract_list) or len(
                    abstract_list) != len(
                link_list):
                raise ValueError('数据长度不一致')
        print(f'第{page}页数据获取完成', multiprocessing.current_process().name)
    print('**********************数据翻译中**********************')
    for i in range(len(title_list)):
        try:
            title_list[i] = get_translate_text(title_list[i], 1)
            abstract_list[i] = get_translate_text(abstract_list[i], 1)
        except:
            raise ValueError('!!!!!!!!!!!!!!!!!!!!!翻译失败!!!!!!!!!!!!!!!!!!!!!!!')
    print("**********************数据翻译完成***********************")
    # 存入excel文件
    import pandas as pd

    data = pd.DataFrame({'title': title_list, 'time': time_list, 'abstract': abstract_list, 'link': link_list})
    data.to_excel(f'D:/SpringerLink/{keyword}-{page_start} to {page_end}.xlsx', index=False)
    print('数据保存成功')


mode = 0
folder_path = 'D:/SpringerLink/'
if not os.path.exists(folder_path):
    os.makedirs(folder_path)

choice = input("是否切换中文搜索模式（y/n）：")
if choice.lower() == 'y':
    mode = 1

key_str = generate_search_sentence(mode)
page_start, page_end = map(int, re.split(r'[,\uff0c]', input('请输入起始页和结束页（用逗号间隔）：')))
base_url = 'https://link.springer.com'
cookies = {
    'idp_marker': '6c169e13-7d45-4fb7-b2d4-3eb38c6139bb',
    'user.uuid.v2': '"a88c77f9-f4be-42ab-9070-34539b247e05"',
    'sim-inst-token': '""',
    'idp_session': 'sVERSION_1ffc3238b-7287-46f4-b5e7-902f1ea5afe9',
    'idp_session_http': 'hVERSION_1ecc87374-de94-4f88-bf72-40a7f1302e97',
    'sncc': 'P%3D17%3AV%3D46.0.0%26C%3DC01%2CC02%2CC03%2CC04%26D%3Dtrue',
    '_gid': 'GA1.2.1269923335.1717084599',
    'lantern': 'f18ed749-0b45-4e03-bc12-76ab03e03930',
    'trackid': '"i3ex7vfj7rlo3mx7ddz7jvh8a"',
    '_hjSession_3590378': 'eyJpZCI6ImJjNDEzNTllLWE5MWYtNGMyYy1iYmE5LWI4MzdiNWRmODg4YiIsImMiOjE3MTcwODQ2NjczMDUsInMiOjAsInIiOjAsInNiIjowLCJzciI6MCwic2UiOjAsImZzIjoxLCJzcCI6MX0=',
    '_hjSessionUser_3590378': 'eyJpZCI6Ijg2ZTJlOThmLWY0MDItNTBjNC04MTA0LWFmOWI0ZjAzOTc0YyIsImNyZWF0ZWQiOjE3MTcwODQ2NjczMDUsImV4aXN0aW5nIjp0cnVlfQ==',
    'Hm_lvt_e1214cdac378990dc262ce2bc824c85a': '1717084682',
    'permutive-id': 'e8c534b7-15e2-4b46-993c-57117a5745a2',
    '_uetsid': '33f916f01e9d11ef8297dd3d8cf0ff3c',
    '_uetvid': '33f93d301e9d11ef9190b16b121fe905',
    'Hm_lpvt_e1214cdac378990dc262ce2bc824c85a': '1717084710',
    '_ga': 'GA1.2.402844308.1717084598',
    'amp_72dea4': 'LElu84wEhnmnUVbYtQ0R-X...1hv53fqou.1hv53itvt.4.0.4',
    'permutive-session': '%7B%22session_id%22%3A%22bc12f3b1-66a9-48fc-9c58-73be7313e166%22%2C%22last_updated%22%3A%222024-05-30T15%3A58%3A35.309Z%22%7D',
    '_ga_B3E4QL2TPR': 'GS1.1.1717084598.1.1.1717084731.60.0.0',
}
headers = {
    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
    'cache-control': 'no-cache',
    # 'cookie': 'idp_marker=6c169e13-7d45-4fb7-b2d4-3eb38c6139bb; user.uuid.v2="a88c77f9-f4be-42ab-9070-34539b247e05"; sim-inst-token=""; idp_session=sVERSION_1ffc3238b-7287-46f4-b5e7-902f1ea5afe9; idp_session_http=hVERSION_1ecc87374-de94-4f88-bf72-40a7f1302e97; sncc=P%3D17%3AV%3D46.0.0%26C%3DC01%2CC02%2CC03%2CC04%26D%3Dtrue; _gid=GA1.2.1269923335.1717084599; lantern=f18ed749-0b45-4e03-bc12-76ab03e03930; trackid="i3ex7vfj7rlo3mx7ddz7jvh8a"; _hjSession_3590378=eyJpZCI6ImJjNDEzNTllLWE5MWYtNGMyYy1iYmE5LWI4MzdiNWRmODg4YiIsImMiOjE3MTcwODQ2NjczMDUsInMiOjAsInIiOjAsInNiIjowLCJzciI6MCwic2UiOjAsImZzIjoxLCJzcCI6MX0=; _hjSessionUser_3590378=eyJpZCI6Ijg2ZTJlOThmLWY0MDItNTBjNC04MTA0LWFmOWI0ZjAzOTc0YyIsImNyZWF0ZWQiOjE3MTcwODQ2NjczMDUsImV4aXN0aW5nIjp0cnVlfQ==; Hm_lvt_e1214cdac378990dc262ce2bc824c85a=1717084682; permutive-id=e8c534b7-15e2-4b46-993c-57117a5745a2; _uetsid=33f916f01e9d11ef8297dd3d8cf0ff3c; _uetvid=33f93d301e9d11ef9190b16b121fe905; Hm_lpvt_e1214cdac378990dc262ce2bc824c85a=1717084710; _ga=GA1.2.402844308.1717084598; amp_72dea4=LElu84wEhnmnUVbYtQ0R-X...1hv53fqou.1hv53itvt.4.0.4; permutive-session=%7B%22session_id%22%3A%22bc12f3b1-66a9-48fc-9c58-73be7313e166%22%2C%22last_updated%22%3A%222024-05-30T15%3A58%3A35.309Z%22%7D; _gat=1; _ga_B3E4QL2TPR=GS1.1.1717084598.1.1.1717084731.60.0.0',
    'pragma': 'no-cache',
    'priority': 'u=0, i',
    'referer': 'https://link.springer.com/search?new-search=true&query=deep+learning&dateFrom=&dateTo=&sortBy=relevance',
    'sec-ch-ua': '"Microsoft Edge";v="125", "Chromium";v="125", "Not.A/Brand";v="24"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
    'sec-fetch-dest': 'document',
    'sec-fetch-mode': 'navigate',
    'sec-fetch-site': 'same-origin',
    'sec-fetch-user': '?1',
    'upgrade-insecure-requests': '1',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0',
}
# 这是不使用多进程的版本
multi_process_pool = mp.MyProcessPoolDifferentTask(5)
get_paper(key_str, page_start, page_end, cookies, headers, base_url)
"""
中文 分割 深度学习，中文 分割 神经网络，情感分析 中文 神经网络，情感分析 中文 深度学习，中文 变形体 深度学习，中文 变形体 神经网络
deep learning AND Natural Language Processing,deep learning AND tokenization,Tokenization AND deep learning,Seq2Seq AND deep learning AND Word Segmentation,Word Segmentation AND deep learning
"""
