"""
@Filename       : kg_builder.py
@Create Time    : 2021/09/16 22:32
@Author         : Rylynn
@Description    : 

"""
import pickle
import json

import requests
import tagme
from tqdm import tqdm
from wikidata.entity import Entity
from wikidata.client import Client
import networkx as nx

from util.preprocess import load_vocab_dict



class KgBuilder():
    def __init__(self):
        super(KgBuilder, self).__init__()
        self.wikidata_client = Client()
        tagme.GCUBE_TOKEN = "41a30b59-c285-4303-8522-61c9760b4cae-843339462"
        self.tagme = tagme

    def build_kg_from_text(self, content_id_list, content_dict):
        kg = nx.Graph()
        for cid in content_id_list:
            if not content_dict.get(cid):
                continue
            candicate_entities = tagme.annotate(content_dict[cid])
            print(content_dict[cid])
            print('eitity', candicate_entities)
            for entity in candicate_entities.annotations:
                print(entity)
                if entity.score <= 0.2:
                    continue
                entity_name = entity.entity_title
                qid = self.search_kg_qid(entity_name)

                if qid is None:
                    continue
                else:
                    qid = qid['id']
                subject = self.wikidata_client.get(qid)
                if not kg.has_node(qid):
                    kg.add_node(qid)
                try:
                    for predicate, object in subject.lists():
                        print(predicate, object)
                        if type(object) is Entity:
                            if not kg.has_edge(subject.id, object.id):
                                kg.add_edge(subject.id, object.id, r=predicate.id)
                except Exception as e:
                    print(e)
                    # self.wikidata.
                    # wiki_id = self.wiki_engine.get_id(title)
                    # description = self.wiki_engine.get_description(id)
                    # claims = self.wiki_engine.get_claims(id)
                    # kg.add_node(title)
                    # for claim in claims:
                    #     kg.add_edge(title, claim, p=claim)
        return kg

    def search_kg_qid(self, entity_name):
        api_url = "https://www.wikidata.org/w/api.php"
        params = {"action": "wbsearchentities",
                  "search": entity_name,
                  "language": "en",
                  "limit": 5,
                  "format": "json"}

        results = requests.request(method="get", url=api_url, params=params)
        results = results.json()
        entity_list = results['search']
        if len(entity_list) == 0:
            return None
        else:
            return entity_list[0]

    def search_kg_links(self, entity_id):
        entity = self.wikidata_client.get(entity_id)
        links = entity.lists()
        return links


def content_kg_extraction(dataset):
    try:
        content_kg_dict = json.load(open('../../../../data/{}/content.json'.format(dataset), 'rb'))
    except Exception as e:
        content_kg_dict = {}
    old_content_kg_dict = json.load(open('../../../../data/dblp/content.json', 'rb'))
    kg_builder = KgBuilder()

    content_dict = {}

    with open('../../../../data/{}/content.txt'.format(dataset), encoding='utf8') as f:
        for line in f.readlines():
            content_id, content = line.split('\t')
            content_dict[content_id] = content
    
    existing_number = len(content_kg_dict.items())
    number = 1
    for cid, content in content_dict.items():
        if cid in old_content_kg_dict.keys():
            content_kg_dict[cid] = old_content_kg_dict[cid]
            continue
        if number <= existing_number:
            number += 1
            continue
        triple_list = []

        if not content_dict.get(cid):
            continue
        candicate_entities = tagme.annotate(content_dict[cid])
        print(number, content_dict[cid])
        number += 1
        print('entity', candicate_entities)
        if candicate_entities is None:
            continue
        for entity in candicate_entities.annotations:
            print(entity)
            if entity.score <= 0.2:
                continue
            entity_name = entity.entity_title
            qid = kg_builder.search_kg_qid(entity_name)


            if qid is None:
                continue
            else:
                qid = qid['id']
            subject = kg_builder.wikidata_client.get(qid)

            for predicate, object in subject.iterlists():
                print(predicate, object)
                for o in object:
                    if type(o) == Entity:
                        triple_list.append((qid, predicate.id, o.id))


        content_kg_dict[cid] = triple_list
        with open('../../../../data/{}/content.json'.format(dataset), 'w') as f:
            json.dump(content_kg_dict, f)
    return content_kg_dict


def run_kg_extraction(dataset):
    vocab_dict = load_vocab_dict('../../../../data', dataset)

    kg_builder = KgBuilder()
    content_dict = {}
    with open('../../../../data/{}/content.txt'.format(dataset), encoding='utf8') as f:
        for line in f.readlines():
            content_id, content = line.split('\t')
            content_dict[content_id] = content

    user_content = {}
    with open('../../../../data/{}/cascade.txt'.format(dataset), encoding='utf8') as f:
        for line in f.readlines():
            if len(line.strip()) == 0:
                continue
            content_id = line.strip().split()[0]
            chunks = line.strip().split()[1:]
            for chunk in chunks:
                user, timestamp = chunk.split(',')
                user, timestamp = eval(user), eval(timestamp)

                if not user_content.get(vocab_dict[user]):
                    user_content[vocab_dict[user]] = [content_id]
                else:
                    user_content[vocab_dict[user]].append(content_id)

    uikg_dict = {}
    for user, content_list in user_content.items():
        print(content_list)
        uikg = kg_builder.build_kg_from_text(content_list, content_dict)
        uikg_dict[user] = uikg

    pickle.dump(uikg_dict, open('../../../../data/{}/uikg.pkl'.format(dataset), 'wb'))


def tagme_test(text):
    tagme.GCUBE_TOKEN = "41a30b59-c285-4303-8522-61c9760b4cae-843339462"
    annotations = tagme.annotate(text)
    # mentions = tagme.mentions(text)
    # for mention in mentions.mentions:
    #     print(mention.mention)
    for entity in annotations.annotations:
        print(entity.entity_id)
        print(entity.entity_title)


if __name__ == '__main__':
    # run_kg_extraction("memetracker")
    dict = content_kg_extraction("dblp_new")
    pickle.dump(dict, open('../../../../data/{}/content.pkl'.format('dblp_new'), 'wb'))

    # tagme_test("Q10: The Effect of Temperature on Ion Channel Kinetics")
