import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import json
from tqdm import tqdm
import zipfile
from lxml import etree
import requests
import seaborn as sns

#可能需要解压文件
f = zipfile.ZipFile("./arxiv-metadata-oai-2019.json.zip",'r') # 原压缩文件在服务器的位置
for file in f.namelist():
    f.extract(file,"./")               # 解压到的位置
f.close()

#读取
data = []
with open('arxiv-metadata-oai-2019.json','r') as f:
    for line in f:
        data.append(json.loads(line))
    data = pd.DataFrame(data)
data.head()

#获取arXiv计算机类的所有方向
req = requests.get('https://arxiv.org/category_taxonomy')
html = etree.HTML(req.text.encode('utf-8'))
#从原网页中得到的xpath
categories_arxiv = html.xpath('//*[@id="category_taxonomy_list"]/div[1]/div/div/div/div[1]/h4/text()')

for i in range(len(categories_arxiv)):
    categories_arxiv[i] = categories_arxiv[i].strip(' ')
categories_cnt_dic = {}

for i in categories_arxiv:
    categories_cnt_dic[i] = 0#先初始化为0

#filt = pd.DataFrame()
for i in  tqdm(range(data.shape[0])):
    #categories = str(data.iloc[i]['categories'])
    date = str(data.iloc[i]['update_date'])
    if ('2019' in  date):
        categories = str(data.iloc[i]['categories']).split(' ')
        for i in range(len(categories)):
            categories[i] = categories[i].strip(' ')
        for i in categories:
            if i in categories_arxiv:
                categories_cnt_dic[i] += 1
        #filt = filt.append(data.iloc[i])
#过滤一下
#filt.head()

#可视化绘图，绘制饼图
categories_2019 = pd.Series(categories_cnt_dic)
plt.figure(figsize=(48,16))
plt.pie(categories_2019,labels = categories_arxiv,autopct='%1.2f%%', startangle=160)
plt.tight_layout()#尽量不要遮挡
plt.show()

#化成饼图看起来太密集了，分为四个子图画为柱状图
fig,axes = plt.subplots(2,2,sharex = False,sharey = False,dpi=300,figsize=(24,8))
slces = categories_2019.shape[0]//10 #切片数量
slc = 10#切片长度
for i in range(slces):
    categories_2019[i*slc:(i+1)*slc].plot.bar(ax = axes[i//2][i%2],color = 'g',alpha = 0.7)
plt.show()

#拓展分析看那个种类的论文发表的最多
categories_arxiv = html.xpath('//*[@id="category_taxonomy_list"]/div/div/div/div/div[1]/h4/text()')
categories_arxiv = pd.Series(categories_arxiv)
categories_arxiv  = categories_arxiv.apply(lambda x:x.strip(' '))#在网页中爬取的分类名字可能会有空格，这里清除了一下

categoris_data = pd.DataFrame(np.zeros(categories_arxiv.shape[0]),index = categories_arxiv)#重建一个df用来保存文章数量的数据

for i in tqdm(categoris_data.index):#分别计算出各个分类的论文数量
    categoris_data.loc[i] = data[data['categories'].apply(lambda x: i in x)].shape[0]

categoris_data#查看数据

sort_categories = categoris_data.sort_values(by=0,ascending = False)[:10]#将降序排序取发表数量最多的前十个分类
sort_categories#查看数据

sort_categories.plot.bar(color = 'g',alpha = 0.7)#画图查看
plt.show()


#Task2统计作者top10
#authors_data = data['authors_parsed']
#authors = list()
data2 = data[data['categories'].apply(lambda x: 'cs.CV' in x)]#要是整个数据集的话扛不住，为了节约时间就只统计整个cv分类的论文发表top，有兴趣的小伙伴可以统计其他分类的
all_authors = sum(data2['authors_parsed'], [])
authors = [' '.join(x) for x in all_authors]

authors_data = pd.DataFrame(authors,index = np.arange(len(authors)),columns = ['authors'])
authors_count = authors_data['authors'].value_counts()#统计出作者的文章数量

#作者的文章数量与评论数量的df初始化
authors_data = pd.DataFrame(np.zeros(authors_count.shape[0]*2).reshape(authors_count.shape[0],2),index = authors_count.index,columns = ['articles','comments'])
authors_data['articles'] = authors_count.values

for i in tqdm(range(data2.shape[0])):#分别统计作者的文章数量与文章的评论数量
    comment_ = data2.iloc[i]['comments']
    aus = eval(str(data2.iloc[i]['authors_parsed']))
    #整理一下作者信息
    aus = [' '.join(x) for x in aus]
    if comment_:
        for i in aus:
            authors_data.loc[i,'comments'] += 1

authors_data.head()

authors_data['percent'] = authors_data['comments']/authors_data['articles']#计算评论率

authors_data.head()

authors_data.describe()

authors_data.sort_values(by='percent')#发现这样来看评论率并不科学

authors_data_ser = authors_data[authors_data['percent'].apply(lambda x:x!=1.0 and x!=0.0)]#剔除一些不规则的数据

authors_data_ser.sort_values(by='percent',ascending = False).head(10)#进行降序排序查看

authors_data.sort_values(by = 'articles',ascending=False).head(10)#先看一下发文最多的,看起来应该是一个机构或者组织的发文

authors_data.sort_values(by = 'comments',ascending=False).head(10)#再来看被评论数最多的


#天池例子典型
# 选择类别为cs.CV下面的论文
data2 = data[data['categories'].apply(lambda x: 'cs.CV' in x)]

# 拼接所有作者
all_authors = sum(data2['authors_parsed'], [])

# 拼接所有的作者
authors_names = [' '.join(x) for x in all_authors]
authors_names = pd.DataFrame(authors_names)

# 根据作者频率绘制直方图
plt.figure(figsize=(10, 6))
authors_names[0].value_counts().head(10).plot(kind='barh')

# 修改图配置
names = authors_names[0].value_counts().index.values[:10]
_ = plt.yticks(range(0, len(names)), names)
plt.ylabel('Author')
plt.xlabel('Count')

#姓名统计
authors_lastnames = [x[0] for x in all_authors]
authors_lastnames = pd.DataFrame(authors_lastnames)

plt.figure(figsize=(10, 6))
authors_lastnames[0].value_counts().head(10).plot(kind='barh')

names = authors_lastnames[0].value_counts().index.values[:10]
_ = plt.yticks(range(0, len(names)), names)
plt.ylabel('Author')
plt.xlabel('Count')

#姓的统计
authors_lastnames = [x[0] for x in all_authors]
authors_lastnames = pd.DataFrame(authors_lastnames)

plt.figure(figsize=(10, 6))
authors_lastnames[0].value_counts().head(10).plot(kind='barh')

names = authors_lastnames[0].value_counts().index.values[:10]
_ = plt.yticks(range(0, len(names)), names)
plt.ylabel('Author')
plt.xlabel('Count')

#姓的第一个字符的统计
authors_lastnames = [x[0][0] for x in all_authors]
authors_lastnames = pd.DataFrame(authors_lastnames)

plt.figure(figsize=(10, 6))
authors_lastnames[0].value_counts().head(10).plot(kind='barh')

names = authors_lastnames[0].value_counts().index.values[:10]
_ = plt.yticks(range(0, len(names)), names)
plt.ylabel('Author')
plt.xlabel('Count')

