import pymongo
import configparser
import csv
import hashlib
import urllib
import os
import time
from urllib.parse import urlparse
from urllib import parse
from bs4 import BeautifulSoup
from ordered_set import OrderedSet
from aip import AipNlp


config = configparser.ConfigParser()
config.read('config.ini')
conn_str = config['MongoDB']['connection_string']
mongo_client = pymongo.MongoClient(conn_str)

database = config['MongoDB']['database']
db = mongo_client[database]

appid = config['Baidu.Aip']['APP_ID']
appsecret = config['Baidu.Aip']['API_KEY']
secretkey = config['Baidu.Aip']['SECRET_KEY']


# read csv


def print_hi(name):
    # Use a breakpoint in the code line below to debug your script.
    print(f'{name} process completed. ')  # Press Ctrl+F8 to toggle the breakpoint.


def read_file_dir(path):
    allhtmls = []
    alltexts = []

    for root, dirs, files in os.walk(path):
        for dir in dirs:
            for rdsub, dirsubs, filesubs in os.walk(os.path.join(path, dir)):
                if 'content.html' in filesubs:
                    # 文件列表，包含完整路径
                    allhtmls.append(os.path.join(os.path.join(path, dir), 'content.html'))

    for file in allhtmls:
        with open(file, encoding="utf-8", mode='r') as htmlFile:
            content = htmlFile.read()
            soup = BeautifulSoup(content, 'html.parser')

            subtexts = soup.text.strip().split('\n')
            slimtext = ''
            for text in subtexts:
                if bool(text.strip()):
                    slimtext += ' ' + text.strip()

            alltexts.append(slimtext)

    return alltexts


def lexer_words(alltexts):
    lexer_client = AipNlp(appid, appsecret, secretkey)
    for text in alltexts:
        result = lexer_client.lexerCustom(text)
        time.sleep(0.2)
    pass


def upsert_to_mongo(alldict):
    for one in alldict:
        one['_id'] = one['id']
        one.pop('id')
        entity = db['keywords'].find_one({'_id': one['_id']})
        if entity is not None:
            if one.get('fj_name') is not None:
                entity['fj_name'] = one['fj_name']

            # merge arraylist
            if one.get('synonyms') is not None:
                tmpset = OrderedSet(one.get('synonyms'))
                if entity['synonyms'] is not None:
                    tmpset.union(OrderedSet(entity['synonyms']))
                entity['synonyms'] = list(tmpset)
            if one.get('cn_diagnosis') is not None:
                tmpset = OrderedSet(one.get('cn_diagnosis'))
                if entity.get('cn_diagnosis') is not None:
                    tmpset.union(OrderedSet(entity['cn_diagnosis']))
                entity['cn_diagnosis'] = list(tmpset)
            if one.get('cn_dialectic') is not None:
                tmpset = OrderedSet(one.get('cn_dialectic'))
                if entity.get('cn_dialectic') is not None:
                    tmpset.union(OrderedSet(entity['cn_dialectic']))
                entity['cn_dialectic'] = list(tmpset)

            entity.pop('_id')
            if one.get('src') is not None:
                tmpset = OrderedSet(one.get('src'))
                if entity.get('src') is not None:
                    tmpset.union(OrderedSet(entity['src']))
                entity['src'] = list(tmpset)
            db['keywords'].update_one({'_id': one['_id']}, {'$set': entity})
        else:
            db['keywords'].update_one({'_id': one['_id']}, {'$set': one}, upsert=True)


if __name__ == '__main__':
    alltexts = read_file_dir('E:\\A-I\\task1')

    all_lexer_words_dict = lexer_words(alltexts)
    # alldict = read_file_type2('type2.csv')
    # upsert_to_mongo(alldict)
    # alldict = read_file_type3('type3.csv')
    # upsert_to_mongo(alldict)
    # alldict = read_file_type4('type4.csv')
    # upsert_to_mongo(alldict)

    print_hi('Keyword Upsert Lexer Importer ')
