import jieba

import common.AdapterBsAndRequest as gsw
import common.OsUtil as ost
import os
import csv
# from snownlp import SnowNLP

from pypinyin import pinyin, lazy_pinyin, Style


# from snownlp import SnowNLP

# commom path
STORE_PATH = os.path.abspath('.')+"/file/"
CONTEXT_SPLIT_CHAR = "——"


class GuShiWenRequestItemProcess(gsw.AdapterBsAndRequest):
    def __init__(self, url=None):
        '''
        init
        :param url: 默集网址的域名
        '''
        super().__init__(url)
        self.__url_word_list = []

    def process_item(self):

        bs_main_context = super().res_soup_from_request(path="guwen/chuci.aspx")
        # query
        book_counts = bs_main_context.select("body .bookcont")
        # print(book_counts)

        for item in book_counts:
            find_item_ele = item.find("ul")
            for link in find_item_ele:
                item_ele_obj = link.find("a")
                if item_ele_obj != -1 and item_ele_obj is not None:
                    self.__url_word_list.append(item_ele_obj.get("href"))

        if len(self.__url_word_list) <= 0:
            return None

        for url_item in self.__url_word_list:
            child_context = super().res_soup_from_request(url=url_item)
            body_item = str(str(child_context.title.text).replace("\n", "").split("、")[0])
            if body_item is None:
                continue
            if body_item.count("_") > 0:
                body_item = str(body_item.split("_")[0]).lstrip()

            body_item = body_item.replace("原文及翻译", "").replace("原文", "")
            item_detail_context = str(child_context.find("textarea").text)
            if os.path.exists(STORE_PATH) is False:
                os.mkdir(STORE_PATH)

            fl = open(STORE_PATH + str(body_item) + ".txt", "w")

            if item_detail_context.count(CONTEXT_SPLIT_CHAR) > 0:
                item_detail_context = item_detail_context.split(CONTEXT_SPLIT_CHAR)[0]
            fl.write(item_detail_context)

    def __pinyin_concat(self,lst=None):

         if lst is None:
            return
         if type(lst) is not list:
           return

         return str(lst[0])


    def read_local_file(self, folder=None):
        '''
        分析指定文件夹下的文件,分词
        :param folder: 文件夹
        :return:
        '''

        if folder is None:
            return None

        all_file_path_list = ost.OsUtil(folder).dir_scan(postfix=".txt")
        if all_file_path_list is None:
            return None
        list_word_pin_yin = []
        for item in all_file_path_list:
            rs_text = super().file_open_obj(item)
            if rs_text is None:
                continue
            jb_iter = jieba.cut(rs_text, cut_all=True, HMM=False)
            for jb_item in jb_iter:
                jb_item = str(jb_item).lstrip()
                if jb_item is None or jb_item == "" or jb_item == "u3000":
                    continue
                '''
                SnowNLP 有些词无法获取拼音,需要别寻方案
                '''
                # print("-----------")
                # print(jb_item)

                # print(pinyin(jb_item))
                # print(type(pinyin(jb_item)))
                # print(jb_item+"==="+" ".join(map(self.__pinyin_concat,pinyin(jb_item))))
                list_word_pin_yin.append((jb_item," ".join(map(self.__pinyin_concat,pinyin(jb_item)))," ".join(lazy_pinyin(jb_item))))

        with open(folder+'word.csv', 'w') as f:
            f_csv = csv.writer(f)
            # f_csv.writerow(headers)
            f_csv.writerows(list_word_pin_yin)
        # print(list_word_pin_yin)


# item_process = GuShiWenRequestItemProcess("https://www.gushiwen.org/")
# item_process.process_item()
# item_process.read_local_file(STORE_PATH)