import os.path
import pandas as pd
import jieba
import jieba.posseg as psg
import re
import json
from nltk import word_tokenize,pos_tag
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
wnl = WordNetLemmatizer()

word_list_path = "assist_data/add_word_list.txt"
stop_word_list_path = "assist_data/stop_word_list.txt"
jieba_flag_list = ['n', 'nz', 'vn']   # parts of speech
nltk_flag_list = ['NN', 'NNS']


def chinese_word_cut(text):
    jieba.load_userdict(word_list_path)
    jieba.initialize()
    try:
        stopword_list = open(stop_word_list_path, encoding="UTF-8")
    except FileNotFoundError as e:
        stopword_list = []

    stop_list = []
    for word in stopword_list:
        word = re.sub(u'\n|\r', '', word)
        stop_list.append(word)

    word_list = []
    # jieba分词
    seg_list = psg.cut(text)
    for seg_word in seg_list:
        word = re.sub(u'[^\u4e00-\u9fa5]', '', seg_word.word)
        find = 0
        for stop_word in stop_list:
            if stop_word == word or len(word) < 2:  # this word is stopword
                find = 1
                break
        if find == 0 and seg_word.flag in jieba_flag_list:
            word_list.append(word)
    return " ".join(word_list)


def english_word_cut(text):
    text = text.lower()

    cut_words_v1 = word_tokenize(text)
    inter_punctuation = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%']  # 定义符号列表
    cut_words_v2 = [word for word in cut_words_v1 if word not in inter_punctuation]  # 去除标点符号
    stops = set(stopwords.words("english"))
    cut_words_v3 = [word for word in cut_words_v2 if word not in stops]

    cut_words_v3 = pos_tag(cut_words_v3)

    word_list = []
    for word in cut_words_v3:
        if word[1] in nltk_flag_list:
            if 'http' not in word[0] and '/' not in word[0]:
                word_list.append(wnl.lemmatize(word[0], 'n'))
    return " ".join(word_list)


def read_weibo_json(weibo_data_path):
    json_lines = open(weibo_data_path, encoding="UTF-8").readlines()

    data = {"time": [], "content": []}

    for line in json_lines:
        tweet = json.loads(line)
        data["time"].append(tweet["created_at"])
        data["content"].append(tweet["content"])

    data = pd.DataFrame(data)
    return data


def read_twitter_json(twitter_data_path):
    json_lines = open(twitter_data_path, encoding="UTF-8").readlines()

    data = {"time": [], "content": []}

    for line in json_lines:
        # print(line)
        tweet = json.loads(line)
        data["time"].append(tweet["tweetcreatedts"])
        data["content"].append(tweet["text"])

    data = pd.DataFrame(data)
    return data


def get_from_file(weibo_data_path, twitter_data_path, processed_data_path, lang="ch"):
    data = None
    if os.path.exists(processed_data_path):
        data = pd.read_excel(processed_data_path)
    else:
        if lang == "ch":
            data = read_weibo_json(weibo_data_path)
            data["tokenized"] = data.content.apply(chinese_word_cut)
        else:
            data = read_twitter_json(twitter_data_path)
            data["tokenized"] = data.content.apply(english_word_cut)
            pass

        data = data.drop(data.index[data["tokenized"] == ""])
        data = data.sort_values(by=["time"])
        data.to_excel(processed_data_path, index=False)

    data = data.dropna()
    return data
