import zipfile
import re
import jieba
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import pandas as pd
import chardet

# 下载 nltk 数据
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('punkt')

# 检查 NLTK 数据路径
print(nltk.data.path)

# 定义停用词列表
chinese_stopwords = set(stopwords.words('chinese'))
english_stopwords = set(stopwords.words('english'))

# 初始化词形还原器
lemmatizer = WordNetLemmatizer()

def extract_text_from_zip(zip_path):
    """从压缩包中读取所有文本文件"""
    texts = {}
    with zipfile.ZipFile(zip_path, 'r') as zip_ref:
        for file_name in zip_ref.namelist():
            if file_name.endswith('.txt'):
                with zip_ref.open(file_name) as file:
                    raw_data = file.read()
                    encoding = chardet.detect(raw_data)['encoding']
                    try:
                        text = raw_data.decode(encoding)
                    except UnicodeDecodeError:
                        # 如果自动检测的编码失败，尝试其他常见编码
                        try:
                            text = raw_data.decode('gbk')
                        except UnicodeDecodeError:
                            text = raw_data.decode('latin1')
                    texts[file_name] = text
    return texts

def extract_chinese_and_english(text):
    """从文本中提取中文和英文内容"""
    chinese_text = ''.join(re.findall(r'[\u4e00-\u9fff]+', text))
    english_text = ' '.join(re.findall(r'[a-zA-Z]+', text))
    return chinese_text, english_text

def count_words(text, language='chinese'):
    """统计词频"""
    if language == 'chinese':
        words = [word for word in jieba.cut(text) if word not in chinese_stopwords and len(word) > 1]
    else:
        words = [lemmatizer.lemmatize(word.lower()) for word in nltk.word_tokenize(text) if word.lower() not in english_stopwords and len(word) > 1]
    word_count = {}
    for word in words:
        word_count[word] = word_count.get(word, 0) + 1
    return word_count

def save_to_csv(word_count, output_file, encoding='utf-8'):
    """将词频统计结果保存到 CSV 文件"""
    df = pd.DataFrame(list(word_count.items()), columns=['Word', 'Count'])
    df.to_csv(output_file, index=False, encoding=encoding)

def main(zip_path):
    texts = extract_text_from_zip(zip_path)
    for file_name, text in texts.items():
        chinese_text, english_text = extract_chinese_and_english(text)
        chinese_word_count = count_words(chinese_text, 'chinese')
        english_word_count = count_words(english_text, 'english')
        
        base_name = file_name.rsplit('.', 1)[0]
        save_to_csv(chinese_word_count, f'{base_name}_chinese.csv', encoding='utf-8')
        save_to_csv(english_word_count, f'{base_name}_english.csv', encoding='utf-8')

if __name__ == "__main__":
    main(r'E:\Desktop\作业素材\python与自然语言处理\PPT\books.zip')