import requests
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt


class Txt:
    def __init__(self):
        self.title = []
        self.download_add = []
        self.year = []


class Year_count:
    def __init__(self):
        self.year_name = []
        self.year_num = []


class Likely_Match:
    def __init__(self):
        self.likely_name = []
        self.likely_adress = []
        self.likely_bri_intro = []


# 全局变量初始化1
end3_lmatch = Likely_Match()
flag_end3 = 0  # 标记是否构建了end3对象，用来存储所有可能目标
url = None
Bri_intro = None
# 全局变量初始化2
outc1 = Txt()
outc2 = Year_count()


# 重置全局变量函数
def reset():
    end3_lmatch.likely_adress = []
    end3_lmatch.likely_bri_intro = []
    end3_lmatch.likely_name = []
    flag_end3 = 0  # 标记是否构建了end3对象，用来存储所有可能目标
    url = None
    Bri_intro = None
    outc1.download_add = []
    outc1.title = []
    outc1.year = []
    outc2.year_name = []
    outc2.year_num = []


def name_match(name):
    global flag_end3
    global url
    global Bri_intro
    flag_exact = 0  # 0为初始状态、1代表进入精准匹配遍历、2代表进入可能匹配遍历
    flag_0001 = 0
    url_1 = 'https://dblp.org/search?q='
    url_2 = url_1 + name  # 将搜索网址的前半部分和目标名字拼接
    # 开始目标信息爬取
    strHtml = requests.get(url_2)  # Get获取该网页数据
    html = strHtml.text
    bf = BeautifulSoup(html, "html.parser")
    # 判断结果是否精准匹配，若是进一步则查找
    div = bf.find_all('div', class_='body hide-body')
    for match in div[1]:
        if match.text:  # 避开了match为空的情况，实际避开了div class=metadata的情况
            if match.text == 'Exact matches':
                flag_exact = 1
            # 尝试去搜索在精准匹配区间内，目标条例中id序列为0001的目标地址与简介
            elif match.text != 'Likely matches' and flag_exact == 1:
                li = match.find_all('li')  # 先找到match下li的条目，每一条li都是一个可能匹配的目标
                for li_part in li:  # 遍历所有li条目（所有检索出的目标）
                    if '0001' in li_part.text:  # 找到id序列为0001的目标项
                        flag_0001 = 1
                        a = li_part.find_all('a')
                        url = a[0].get("href")  # 获取该有id序列为0001的目标地址
                        Bri_intro = li_part.text.replace('0001', ',id=0001,  ')  # 获取该id序列为0001的目标简介
                        break
                if flag_0001 == 1:  # 如果已经找到了id0001的url和Bri，再跳出外层循环
                    break
                # 遍历result-list块后，没找到精准匹配下id为0001的目标
                if flag_0001 == 0:
                    # 获取精准匹配下物理顺序第一目标的地址和信息（保证只要有精准匹配，肯定有目标）
                    firs = match.find_all('a')
                    url = firs[0].get("href")  # 先获取页面物理顺序第一的目标地址
                    Bri_intro = firs[0].text  # 获取将第一匹配目标的信息
                    break  # 当找到url 与 Bri_intro之后，跳出大循环
            # 只要有Exact matches就到不了这里
            elif match.text == 'Likely matches' and flag_exact == 0:
                flag_exact = 2
            elif match.text != 'Likely matches' and flag_exact == 2:
                li = match.find_all('li')  # 先找到match下li的条目，每一条li都是一个可能目标
                for li_part in li:  # 遍历所有li条目
                    a = li_part.find_all('a')
                    small = li_part.find_all('small')
                    end3_lmatch.likely_bri_intro.append(small[0].text)
                    end3_lmatch.likely_name.append(a[0].text)
                    end3_lmatch.likely_adress.append(a[0].get("href"))
                    flag_end3 = 1

    return [url, Bri_intro, flag_end3]


def end3_dispose(choose_num):
    global url
    global Bri_intro
    url = end3_lmatch.likely_adress[choose_num]
    if end3_lmatch.likely_bri_intro[choose_num]:
        Bri_intro = end3_lmatch.likely_bri_intro[choose_num]
    else:
        Bri_intro = end3_lmatch.likely_name[choose_num]
    return [url, Bri_intro]


def information_get(url):
    global outc1
    global outc2
    year = -1
    num_flag = 0
    # 确定目标的地址，进入其网页进行数据获取
    strHtml2 = requests.get(url)
    html2 = strHtml2.text
    bf2 = BeautifulSoup(html2, "html.parser")
    publ = bf2.find_all('ul', class_='publ-list')  # 注意文章列表 publ-list可能不只有一项,十年一隔
    # 在文章块内遍历
    for toc in publ:  # 在每十年的小集合里
        for i, toc_block in enumerate(toc):  # toc_bloc是这十年里的每一项（项里面除了年份就是文章块）
            # 找到标题部分
            title = toc_block.find_all('span', class_='title')
            if title:  # 要考虑空情况（年份）空列表为False
                # get该文章标题
                outc1.title.append(title[0].text)
                # get该文章下载地址 （页面内含文章详细信息）
                download_add_1 = toc_block.find_all('div', class_='head')
                outc1.download_add.append(download_add_1[1].find_all('a')[0].get("href"))  # 注意这里的[1]是因为下载标签在第二个
                # 或者该文章年份
                outc1.year.append(year)
                num_flag = num_flag + 1
            else:
                if num_flag:
                    outc2.year_num.append(num_flag)  # 添加年份—统计信息
                year = toc_block.text  # 只要为空，说明遇到了年份项，此时更新年份变量
                outc2.year_name.append(year)
                num_flag = 0
    if num_flag:  # 最后一年之后遇不上else ，所以要单独加到out2.year_num中
        outc2.year_num.append(num_flag)


def output(bri, name):
    with open(name + '_title-list.txt', 'w', encoding='utf-8') as f:
        f.writelines(bri)
        f.write('\n')
        f.writelines('The list of articles is below：' + '\n')
        length = len(outc1.title)
        for i in range(length):  # 设置i为循环的计数器
            f.writelines(str(i + 1) + '. ' + outc1.title[i] + '\t' + outc1.year[i] + '\n')
            f.writelines('The download address:  ' + outc1.download_add[i])
            f.write('\n\n')


# 绘图函数
def draw(name):
    plt.bar(outc2.year_name, outc2.year_num, 0.5, color="#87CEFA")
    plt.xlabel('Year')
    plt.xticks(rotation=90, fontsize=7)
    plt.ylabel('numbers')
    plt.title(name + ':  Number of articles statistics')
    plt.show()
