import re
import time
import json
import requests
import re
from lxml import etree
from bs4 import BeautifulSoup
import os
line_num = 0
article_num = 0
no_disam_mention_num=0
entity_list = dict()
mention_list = dict()

def get_disam_page(url,f):
    global  line_num
    global article_num

    # print("disam_url:  "+url)
    f.write("disam_url:  "+url+"\n")
    # url="https://en.wikipedia.org/wiki/George_Washington"+"_(disambiguation)"
    if not url.startswith('https://en.wikipedia.org/wiki/'):
        return []
    response = requests.get(url=url)
    response.encoding = 'utf8'
    try:
            html = etree.HTML(response.text)
            html_data = html.xpath('/html/body/div[3]/div[3]/div[4]/div/ul/li/a/@href')
            result_list=[]
            for i in range(len(html_data)):
                if(not html_data[i].startswith('/wiki/')):
                    continue
                re=html_data[i][html_data[i].rindex("/")+1:]
                if(re.endswith("_(disambiguation)")):
                    continue
                else:
                    if (re.find('#') > 0):
                        re = re[:re.find('#')]
                    result_list.append(re)
            return result_list
    except BaseException as e:
        print(e)
        f.write("!!! connect error occur in article: " + str(article_num) + " at line: " + str(line_num)+"  at url  "+url+"\n"+"\n")
        print("!!! connect error occur in article: " + str(article_num) + " at line: " + str(line_num)+"  at url  "+url+"\n")
        # raise e
        return []

def get_disam_entity(word,article_entities,major_name,f_log):
    global entity_list
    try:
        disam_entity_names = get_disam_page(make_disam_url(word), f_log)
        for et_name in disam_entity_names:
            if(et_name==major_name):
                continue
            if (et_name in entity_list):
                entity_disam = entity_list[et_name].copy()
                entity_disam["flag"] = 0
                article_entities.append(entity_disam)
            else:
                entity_disam = dict()
                entity_disam["title"] = et_name
                entity_disam["wiki_url"] = make_entity_url(entity_disam["title"])
                entity_disam["summary"] = get_entity_summary(entity_disam["wiki_url"], f_log)
                entity_disam["pv"] = get_entity_pv(make_pv_url(entity_disam["title"]), f_log)
                entity_list[entity_disam["title"]] = entity_disam.copy()
                entity_disam["flag"] = 0
                article_entities.append(entity_disam)
            break
            if(len(article_entities)>=31):
                break
    except BaseException as e:
        print(e)
        f_log.write("!!! connect error occur in article: " + str(article_num) + " at line: " + str(line_num)+"  when get_disam_entity  "+word+"\n")
        print("!!! connect error occur in article: " + str(article_num) + " at line: " + str(line_num)+"  when get_disam_entity  "+word+"\n")
        # raise e

    return article_entities

def get_entity_summary(url,f):
    global line_num
    global article_num
    # print("entity_url:  " + url)
    f.write("entity_url:  " + url+"\n")
    # url = "https://en.wikipedia.org/wiki/English_people"
    if not url.startswith('https://en.wikipedia.org/wiki/'):
        return []
    response = requests.get(url=url)
    response.encoding = 'utf8'
    summary=""
    try:
        # soup = BeautifulSoup(response.text, 'lxml')
        # content = soup.find(id='mw-parser-output')
        html = etree.HTML(response.text)
        nodes = html.xpath('/html/body/div[3]/div[3]/div[4]/div/p')
        for i in range(len(nodes)):
             data=nodes[i].xpath("string(.)")
             summary+=str(data).strip()+"\n"
             word_num=summary.split(' ')
             if(len(word_num)>=100):
                 break
        # print("summary:  "+summary)
        f.write("summary:  "+summary+"\n")
        return summary
    except BaseException as e:
        print(e)
        f.write("!!! connect error occur in article: " + str(article_num) + " at line: " + str(line_num)+"  at url  "+url+"\n")
        print("!!! connect error occur in article: " + str(article_num) + " at line: " + str(line_num)+"  at url  "+url+"\n")
        # raise e
        return ""

def get_entity_pv(url,f):
    global line_num
    global article_num
    # print("pv_url:  " + url)
    f.write("pv_url:  " + url+"\n")
    # url = "https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia/all-access/user/"+\
    #       "English_people"+\
    #       "/monthly/1015070100/2019080900"
    # if not url.startswith('https://en.wikipedia.org/wiki/'):
    #     exit;
    response = requests.get(url=url)
    response.encoding = 'utf8'
    view_num=0
    try:
        data=json.loads(response.text)["items"]
        for item in data:
            view_num+=int(item["views"])
        # print("view_num:        "+str(view_num))
        f.write("view_num:        "+str(view_num)+"\n")
    except BaseException as e:
        print(e)
        f.write("!!! connect error occur in article: " + str(article_num) + " at line: " + str(
            line_num) + "  at url  " + url + "\n")
        print("!!! connect error occur in article: " + str(article_num) + " at line: " + str(
            line_num) + "  at url  " + url + "\n")
        # raise e
        return 0
    return view_num

def make_entity_url(entity_name):
    return "https://en.wikipedia.org/wiki/"+entity_name

def make_disam_url(mention_name):
    mention_name=mention_name.replace(" ","_")
    return "https://en.wikipedia.org/wiki/"+mention_name+"_(disambiguation)"

def make_pv_url(entity_name):
    return "https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia/all-access/user/" + \
          entity_name + \
          "/monthly/1015070100/2019080900"

def main():
    global line_num
    global article_num
    global entity_list
    global no_disam_mention_num

    # root = "/root/zdj/data/"
    root = "D:/python/spider/Wikipedia/data/"
    for file_name in os.listdir(root):
        if os.path.isfile(os.path.join(root, file_name)):
            file_name=os.path.join(root, file_name)
            print("file_name: " + file_name)
            log_name = file_name + "_log"
            print("log_name:   " + log_name)
            output_name = file_name + "_output"
            print("output_name:   " + output_name)

            f = open(file_name, 'rb')
            f_log = open(log_name, 'w',encoding="utf8")
            f_output = open(output_name, 'w',encoding="utf8")
            text=str(f.read().decode('utf-8')).split('\n')
            artical_info=None
            artical_buff = None
            last_mention=""
            mention_num=0
            for line in text:
                try:
                    line_num+=1
                    words=line.split('\t')
                    if(line.startswith("-DOCSTART-")):
                        if (artical_info!=None):
                           article_num+=1
                           # print("\n".join(artical_buff))
                           f_output.write("\n".join(artical_buff)+"\n")

                           print("article_num:   "+str(article_num))
                           print("no_disam_mention_num: "+str(no_disam_mention_num))
                           print("*************************************************************")
                           f_log.write("article_num:   "+str(article_num)+"\n")
                           f_log.write("*************************************************************"+"\n")
                           f_log.write("no_disam_mention_num: " + str(no_disam_mention_num)+"\n")
                           # print(json.dumps(artical_info))
                           json_re=json.dumps(artical_info)
                           f_log.write(json_re+"\n")
                           f_output.write("__json_result__:"+json_re+"\n")
                           f_output.flush()
                           # if (ttt >= 1):
                           #     break
                           # ttt += 1
                        artical_info=dict()
                        artical_buff=list()
                        mention_num=0
                    elif(len(words)==7 and (words[4].startswith('http://en.wikipedia.org/wiki/') or words[4].startswith('en.wikipedia.org/wiki/'))):
                        words[4]=str(words[4][:4])+"s"+str(words[4][4:])
                        if(words[2]==last_mention):
                            continue
                        elif(words[2] in mention_list):
                            last_mention = words[2]
                            artical_info[words[2]] = mention_list[words[2]]
                            # print("    mention repeat:  "+words[2])
                        else:
                            print("line " + str(line_num) + ":    " + line)
                            f_log.write("line " + str(line_num) + ":    " + line+"\n")
                            last_mention=words[2]
                            mention_num+=1
                            article_entities=list()
                            if(words[3] in entity_list):
                                entity_major=entity_list[words[3]]
                                entity_major["flag"]=1
                                article_entities.append(entity_major)
                                # print("             entity repeat:  "+words[3])
                            else:
                                entity_major=dict()
                                entity_major["title"]=words[3]
                                entity_major["wiki_url"]=make_entity_url(entity_major["title"])
                                entity_major["summary"]=get_entity_summary(entity_major["wiki_url"],f_log)
                                entity_major["pv"]=get_entity_pv(make_pv_url(entity_major["title"]),f_log)
                                entity_list[entity_major["title"]]=entity_major.copy()
                                entity_major["flag"]=1
                                article_entities.append(entity_major)

                            get_disam_entity(last_mention,article_entities,entity_major["title"],f_log)
                            if(len(article_entities)==1 and entity_major["title"]!=last_mention):#实体消岐页
                                get_disam_entity(entity_major["title"],article_entities,entity_major["title"],f_log)
                            if(len(article_entities)==1):
                                mention_split=last_mention.split(' ')
                                if(len(mention_split)>1):#实体元素消岐页
                                    for mention_item in mention_split:
                                        get_disam_entity(mention_item, article_entities,entity_major["title"], f_log)
                                        if(len(article_entities)>1):
                                            break
                                else:#实体小写消岐页
                                    get_disam_entity(mention_split[0].lower(), article_entities,entity_major["title"], f_log)
                            if(len(article_entities)==1):
                                no_disam_mention_num+=1
                            artical_info[last_mention]=article_entities
                            mention_list[last_mention]=article_entities
                            artical_info["mention_num"]=mention_num
                    artical_buff.append(line)
                except BaseException as e:
                    print(e)
                    f_log.write("!!! error occur in article: "+str(article_num)+" at line: "+str(line_num)+"\n")
                    # raise e
if __name__ == '__main__':
    main()
