# This is a sample Python script.

# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import csv
import os
import re
import sys
import time

import db.mysql

from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow
from bs4 import BeautifulSoup
import lxml
from django.forms import Widget

import my_util

import requests
from requests import request

from entity.journal import Journal
from global_config import CSV_TYPE_LINK, CSV_ARCHIVE_LINK, BASE_URL, CSV_ARCHIVE_DETAIL_LIST, TYPE_LINK
from gui import win_start


def type_get(win, url: str) -> list:
    r = my_util.sn_get(win, url=url)
    c = re.compile(r'href="./(.*?)">(.*?)</a>(.*?)</td>')
    rf = re.findall(c, r)
    lt = []
    head = ['link', 'title', 'number']
    for i, v in enumerate(rf):
        dic = {}
        dic[head[0]] = v[0]
        dic[head[1]] = v[1]
        dic[head[2]] = v[2]
        lt.append(dic)
        print(str(i + 1) + "," + str(v[1]))
        win.show_log(str(i + 1) + "," + str(v[1]))
    my_util.csv_writerows(head, lt, CSV_TYPE_LINK)
    return lt


def get_article(win, url: str, page: int) -> list:
    lt = []
    head = ['link', 'title']
    for ind in range(page):
        link = url + "&currentpage=" + str(
            ind + 1) + "#journallisttable"
        try:
            r = my_util.sn_get(win, url=link)
            c = re.compile(r'href="./(index.php\?journalid=.*detail).*>(.*)</a>')
            result = re.findall(c, r)
            for i, v in enumerate(result):
                dic = {}
                dic[head[0]] = v[0]
                dic[head[1]] = v[1]
                lt.append(dic)
                print(str(ind) + str((i + 1) if (i + 1) != 10 else 0) + "," + v[0] + "," + str(v[1]))
                win.show_log(str(ind) + str((i + 1) if (i + 1) != 10 else 0) + "," + v[0] + "," + str(v[1]))
            time.sleep(5)  # 延时
        except Exception as e:
            time.sleep(5)
            continue
    my_util.csv_writerows(head, lt, CSV_ARCHIVE_LINK)
    get_article_detail(win)  # 爬取期刊信息
    return lt


def get_article_detail(win):
    lt = my_util.csv_get_data(CSV_ARCHIVE_LINK)
    head = ["name", "keyword", 'score', "ISSN", "E-ISSN", "new_if", "introduce", "link", "direction"]
    dic_list = []
    for i, v in enumerate(lt):
        try:
            link = BASE_URL + v[0]
            r = my_util.sn_get(win, url=link)
            soup = BeautifulSoup(r, 'lxml')
            # l = soup.find_all("td")
            td_list = soup.select(".table_yjfx>tbody>tr>td")
            # print(ll[20]#
            # print(td_list)
            td_str = "".join(map(str, td_list))
            # print(td_str)
            c = re.compile(r'期刊名字.*">(.*)</a>.*>(.*)</font>')
            names = re.findall(c, td_str)
            # c0 = re.compile(r'评分.*bold;">(.*)</div><div\sid="details_page_rankscore_4"')
            # score = re.findall(c0, td_str)
            issn = soup.find(text=re.compile(r'期刊ISSN')).find_next().text if soup.find(
                text=re.compile(r'期刊ISSN')) is not None else None
            e_issn = soup.find(text=re.compile(r'.*E-ISSN.*')).find_next().text if soup.find(
                text=re.compile(r'.*E-ISSN.*')) is not None else None
            new_if = soup.find(text=re.compile('.*最新IF.*')).find_parent().find_next_sibling().find(
                text=True).strip() if soup.find(text=re.compile('.*最新IF.*')) is not None else None
            # 评分
            score = soup.find(text=re.compile(".*LetPub评分.*")).find_parent().find_next_sibling().find(
                "div").text if soup.find(text=re.compile(".*LetPub评分.*")) is not None else None
            introduce = soup.find(text=re.compile('.*期刊简介.*')).find_parent().find_next_sibling().text
            direction = soup.find(text=re.compile(".*涉及的研究方向.*")).find_parent().find_next_sibling().text
            dic = {}
            name = names[0][0] if len(names) > 0 else None
            keyword = names[0][1] if len(names) > 0 else None
            dic[head[0]] = name
            dic[head[1]] = keyword
            dic[head[2]] = score
            dic[head[3]] = issn
            dic[head[4]] = e_issn
            dic[head[5]] = new_if
            dic[head[6]] = introduce
            dic[head[7]] = link
            dic[head[8]] = direction
            dic_list.append(dic)
            print(issn)
            # 存入数据库
            if issn is not None:
                Journal.create(name=name, keyword=keyword, score=score, issn=issn, e_issn=e_issn, new_if=new_if,
                               introduce=introduce, link=link, direction=direction)
                win.show_log("记录已存入数据库")
            # 延时
            time.sleep(6)
        except Exception as e:
            time.sleep(6)
            continue
    my_util.csv_writerows(head, dic_list, CSV_ARCHIVE_DETAIL_LIST)
    win.show_log("爬取完毕！")


if __name__ == '__main__':
    text = requests.get(BASE_URL + "index.php?journalid=3&page=journalapp&view=detail").text
    soup = BeautifulSoup(text, 'lxml')
    direction = soup.find(text=re.compile(".*涉及的研究方向.*")).find_parent().find_next_sibling().text
    print(direction)
