import csv
import os
import random
import string
import threading
from time import sleep
from typing import cast

import fasttext
import requests
from bs4 import BeautifulSoup, NavigableString

from fastText模块.fastTextTest import m_predict
from util import compress_and_convert_to_base64

# 加载预训练模型
fasttext.FastText.eprint = lambda x: None
model_path = '../aiModel/autotune.bin'
model = fasttext.load_model(model_path)
domain = 'https://baike.baidu.com/'
# searched_txt_path = './data/searchedWord.txt'

lock = threading.Lock()
filePath = "./data/data.csv"

get_count = 0


def get_uuid():
    characters = string.ascii_letters + string.digits
    r = ''.join(random.choice(characters) for _ in range(16))
    return r


def get_content_str(con):
    if con is None: return ""
    contents_str = ''

    for child in con.children:
        if child.name == 'table':
            # 提取table中的数据
            rows = child.find_all('tr')
            data = ""

            for row in rows:
                cols = row.find_all(['td', 'th'])
                col_str = ""
                for elem in cols:
                    col = elem.get_text()
                    col_str+=f"<{elem.name}>" + col + f"</{elem.name}>"
                data += f"<{row.name}>" + col_str + f"</{row.name}>"
            contents_str += data
        elif child.name == 'img':
            # pass
            b = compress_and_convert_to_base64(child['src'])
            # print(b,"\n\n\n")

            # 输出图片的路径（可根据需要修改）
            # output_path = "./output.jpg"

            # save_base64_as_image(b, output_path)
            contents_str += "||<img> %s </img>||" % b
        elif type(child) == NavigableString:
            contents_str += str(child)
        else:
            if child.has_attr('class') and 'MARK_MODULE' in child['class']:
                contents_str += "||"
            contents_str += get_content_str(child)



    return contents_str.strip('查看我的收藏0有用+10').strip("||")


def query(to_search_data_2):

    # 请求头部
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
    }


    # 发送请求，获得响应
    resp = requests.get(to_search_data_2['url'], headers=headers, data=to_search_data_2['data'])
    resp.encoding = "utf-8"

    to_search_href_ls_ = []
    page = BeautifulSoup(resp.content, 'html.parser')

    if page.title is None:
        title_str = "未知"
    else:
        title_str = page.title.string.strip("_百度百科")
    # print(title_str)
    main_content = page.find("div", attrs={"class": "contentTab_IDE4J"})
    r_str_ = get_content_str(main_content).replace(",","，")
    # 返回的内容
    r = {"uuid": get_uuid(), "title": title_str, "content": r_str_}

    # 获取页面上的链接，加入到待查列表
    a_ls = main_content.find_all("a") if main_content is not None else []
    for a_ in a_ls:
        if a_.get("href") is not None:
            href_ = a_["href"]
            d_ = {"fromModule": "lemma_inlink-box"}
            to_search_data_3 = {
                "word":a_.get_text(),
                "url": domain+href_.strip('?fromModule=lemma_inlink'),
                "data": d_
            }
            # print(to_search_data_3)
            to_search_href_ls_.append(to_search_data_3)

    resp.close()
    # return r
    return {"result": r, "toSearched": to_search_href_ls_}


def single_thread(to_search_data_2):


    with lock:
        # 查询结果
        result = query(to_search_data_2)
        r_content = result['result']['content']

        # 标记已查询和未查询
        # w_ = result['result']['title']
        # searched_word_ls.append(w_)
        # with open(searched_txt_path,'w',encoding='utf-8') as f_searched_ls:
        #     f_searched_ls.writelines(" ".join(searched_ls))
        to_search_data_ls.remove(to_search_data_2)
        to_search_data_ls.extend(result['toSearched'])

        # 判断内容，是军事题材才写入
        if m_predict(r_content):
            print( "\n已收录：",result['result']['title'])
            fieldnames = ["uuid", "title", "content"]
            # 保存到csv文件
            is_file_exist = os.path.exists(filePath)
            if is_file_exist:
                f_csv = open(filePath, mode="a", encoding='utf-8', newline='')
            else:
                f_csv = open(filePath, mode="w", encoding='utf-8', newline='')

            csvwriter = csv.DictWriter(f_csv, fieldnames)

            # # 第一次的时候写入表头
            # if is_file_exist:
            #     csvwriter.writeheader()

            csvwriter.writerow(result['result'])
            # print("uuid:",result['result']['uuid'])
            f_csv.close()
            global get_count
            get_count += 1

        print("\r", "已查：", len(searched_word_ls), " 待查：", len(to_search_data_ls), "已收录：", get_count, end="")

        # 把每一个查询到的内容都保存成一个文件
        # file_path = "./data/corpus/%s.txt" %content_
        # with open(file_path,"w",encoding="utf-8") as ff:
        #     ff.write(result['result']['content'])


if __name__ == '__main__':

    # 初始话已搜索词条列表
    searched_word_ls = []
    if os.path.exists("./data/searchedWord.txt"):
        with open("./data/searchedWord.txt",'r', encoding="utf-8") as f:
            words = f.read().split(" ")
            searched_word_ls.extend(words)
        print("初始化已搜词条列表：",len(searched_word_ls))

    to_search_data_ls = []
    # 需要搜索的词语
    to_search_word = input('查询词语：')
    # 第一次搜索的参数
    d = {"fromModule": "lemma_search-box"}
    to_search_data = {
        "word": to_search_word,
        "url": 'https://baike.baidu.com/item/%s' % to_search_word,
        "data": d
    }
    to_search_data_ls.append(to_search_data)

    while len(to_search_data_ls) != 0:
        # 线程池
        threads = []
        # 用循环往线程池里放10个线程
        for i in range(100):

            if len(to_search_data_ls) - 1 >= i:

                to_search_data_ = to_search_data_ls[i]


                # 检查是否已经搜索过
                if to_search_data_['word'] in searched_word_ls:
                    to_search_data_ls.remove(to_search_data_)
                else:
                    searched_word_ls.append(to_search_data_['word'])
                    with lock:
                        with open("./data/searchedWord.txt", "a", encoding="utf-8") as f:
                            f.write(" " + to_search_data_['word'])
                    # 添加到线程池
                    threads.append(
                        # target：函数名；args接受元组作为前面这个函数的变量，如果只有一个变量，也要写成(arg1,)
                        threading.Thread(target=single_thread, args=(to_search_data_,))
                    )

        # 挨个启动线程
        for thread in threads:
            thread.start()

        # 挨个等待线程结束
        for thread in threads:
            thread.join()

        # u_list = list(set(to_search_ls)-set(searched_ls))
        #
        # to_search_ls.clear()
        # to_search_ls.extend(u_list)

        sleep(0.5)

    # with open("../fastText模块/data/train.txt", mode="w", encoding='utf-8', newline='') as f:
    #     f.writelines(" ".join(set(searched_ls)) + " ")
