import jieba
import pandas as pd


# 将英文文本分成每个句子，并统计每个句子中的词的数量
def split_txt_english(path):
    split_line = []
    with open(path, encoding='utf-8') as file:

        contents = file.readlines()
        for lines in contents:
            line = lines.split('.')
            if '\n' not in line:
                split_line.extend(line)
    word = []
    length = []
    for line in split_line:
        word.append(line.split(' '))
        length.append(len(line))
    result = {"sentence": split_line, "word": word, "length": length}
    sentence_dataframe = pd.DataFrame.from_dict(result)

    sentence_dataframe.to_csv('english.csv',index_label='index')


# 将中文文本划分为每个句子，并统计每个句子中词的数量
def split_txt_chinese(path):
    split_line = []
    with open(path, encoding='utf-8') as file:
        contents = file.readlines()
        for lines in contents:
            line = lines.split('。')
            if '\n' not in line and len(line) > 0:
                split_line.extend(line)

    word = []
    length = []
    for line in split_line:
        word.append(jieba.lcut(line))
        length.append(len(line))
    result = {"sentence": split_line, "word": word, "length": length}
    sentence_dataframe = pd.DataFrame.from_dict(result)
    sentence_dataframe.to_csv('chinese_sentence.csv',index_label="index")



# 统计中文词频
def count_chinese(path):
    word = []
    word_count = []
    counts = {}
    with open(path, encoding='utf-8') as file:
        txt = file.read()

    words = jieba.cut(txt)

    for item in words:
        counts[item] = counts.get(item, 0) + 1
    result = list(counts.items())
    result.sort(key=lambda x: x[1], reverse=True)
    for item in result:
        word.append(item[0])
        word_count.append(item[1])
    result = {"word": word, "count": word_count}
    word_count_dataframe = pd.DataFrame.from_dict(result)
    word_count_dataframe.to_csv('chinese_words.csv',index_label="index")
    return word_count_dataframe


# 统计英文分词
def count_english(path):
    word = []
    word_count = []
    counts = {}
    with open(path, encoding='utf-8') as file:
        txt = file.read()
    words = txt.split(' ')
    for item in words:
        counts[item] = counts.get(item, 0) + 1
    result = list(counts.items())
    result.sort(key=lambda x: x[1], reverse=True)
    for item in result:
        word.append(item[0])
        word_count.append(item[1])
    result = {"word": word, "count": word_count}
    word_count_dataframe = pd.DataFrame.from_dict(result)
    word_count_dataframe.to_csv('english_words.csv',index_label="index")
    return word_count_dataframe

count_english('data/ted.txt')

