# ==============================
# 中文情感分类：100 正面 + 100 负面（自定义数据）
# 目标：训练轻量模型 → TFLite → Android
# ==============================
import tensorflow as tf
import numpy as np
import json
from collections import Counter

# --- Step 1: 构建 200 条训练数据 ---
positive_texts = [
    "这个手机真好用", "电影太精彩了", "服务非常周到", "产品质量很棒", "体验感非常好",
    "价格很实惠", "物流很快", "客服态度好", "画面清晰流畅", "音质特别好",
    "电池续航强", "拍照效果惊艳", "系统运行流畅", "设计时尚美观", "屏幕色彩鲜艳",
    "操作简单易懂", "功能非常齐全", "性价比超高", "包装很精致", "售后响应迅速",
    "安装过程顺利", "使用感受极佳", "推荐给朋友了", "完全超出预期", "值得购买",
    "性能强劲稳定", "散热效果不错", "指纹识别灵敏", "人脸识别准确", "充电速度很快",
    "耳机音质清晰", "游戏体验流畅", "看视频很享受", "文字显示锐利", "触控反应灵敏",
    "外观高端大气", "手感非常舒适", "重量刚刚好", "携带方便轻巧", "适合日常使用",
    "学习办公利器", "孩子很喜欢用", "父母也能轻松上手", "更新及时安全", "兼容性很好",
    "没有广告干扰", "界面简洁清爽", "启动速度快", "多任务切换顺滑", "存储空间充足",
    "扩展性强", "连接稳定可靠", "蓝牙配对迅速", "Wi-Fi信号强劲", "通话质量清晰",
    "麦克风收音好", "扬声器声音洪亮", "震动反馈舒适", "按键手感扎实", "接口丰富实用",
    "配件质量上乘", "说明书详细易懂", "环保材料制作", "做工精细无瑕疵", "品牌值得信赖",
    "购物体验愉快", "下单后秒发货", "快递小哥态度好", "商品与描述一致", "没有色差问题",
    "尺寸刚刚合适", "颜色非常好看", "材质手感高级", "耐用不易损坏", "清洁起来方便",
    "节能省电环保", "静音效果出色", "适合家庭使用", "适合学生党", "适合上班族",
    "适合老年人", "适合玩游戏", "适合追剧", "适合拍照", "适合旅行携带",
    "送礼很有面子", "节日促销划算", "会员权益丰富", "积分兑换方便", "退换货流程简单",
    "客服专业耐心", "问题解决迅速", "回访服务贴心", "用户反馈被重视", "持续优化改进",
    "社区氛围友好", "教程资源丰富", "开发者支持到位", "生态联动顺畅", "跨设备协同好"
]

negative_texts = [
    "太差了完全不值", "垃圾产品", "客服不理人", "屏幕有坏点", "充电特别慢",
    "根本不能用", "虚假宣传", "做工粗糙", "系统老是卡顿", "后悔买了",
    "电池掉电飞快", "拍照模糊不清", "音质像收音机", "物流慢到离谱", "包装破损严重",
    "收到货就坏了", "和图片完全不符", "颜色丑到爆", "尺寸根本不对", "材质廉价感强",
    "按键松松垮垮", "屏幕反光严重", "触控经常失灵", "指纹识别失败", "人脸识别无效",
    "充电口接触不良", "耳机插孔松动", "扬声器有杂音", "麦克风无法收音", "Wi-Fi总是断连",
    "蓝牙连不上", "系统频繁死机", "应用闪退严重", "发热烫手", "续航撑不过半天",
    "摄像头对焦慢", "前置美颜过度", "后置夜景全黑", "视频播放卡顿", "文字显示模糊",
    "操作逻辑混乱", "界面广告满天飞", "预装软件太多", "无法卸载流氓软件", "隐私泄露风险",
    "更新后更卡了", "兼容性极差", "配件质量低劣", "说明书看不懂", "售后推三阻四",
    "客服态度恶劣", "问题拖一周不解决", "退货还要自己付运费", "商品缺斤少两", "赠品是假货",
    "促销价格虚高", "会员权益缩水", "积分无法使用", "虚假好评刷单", "差评被删除",
    "物流信息不更新", "快递丢件不赔偿", "客服机器人敷衍", "人工客服难找", "投诉无门",
    "产品有异味", "边框掉漆严重", "屏幕有划痕", "电池鼓包危险", "充电器发热严重",
    "数据线用三天就坏", "耳机线材易断", "包装简陋像地摊货", "品牌信誉扫地", "再也不买",
    "劝大家避雷", "纯属智商税", "浪费钱", "浪费时间", "体验极差",
    "完全不符合描述", "参数造假", "虚假发货", "空包裹", "以次充好",
    "售后服务形同虚设", "维修费用高昂", "保修期形同虚设", "质量问题频发", "安全隐患大",
    "不适合任何人", "买来就后悔", "朋友劝我别买", "网上差评一堆", "实体店都不卖",
    "系统漏洞百出", "病毒广告弹窗", "自动下载垃圾软件", "偷跑流量", "偷用电量"
]

# 合并数据
train_texts = positive_texts + negative_texts
train_labels = [1] * 100 + [0] * 100  # 1=正面, 0=负面

# 测试数据（额外）
test_texts = ["这个平板电脑性能很强", "客服态度极差，问题没人管"]
test_labels = [1, 0]

print(f"✅ 训练数据: {len(train_texts)} 条 (100 正 + 100 负)")
print(f"示例正面: \"{positive_texts[0]}\"")
print(f"示例负面: \"{negative_texts[0]}\"")

# --- Step 2: 按字构建词表 ---
all_chars = []
for text in train_texts:
    all_chars.extend(list(text)) #list 将字符串转为字符串列表  extend  列表追加内容

char_counts = Counter(all_chars)
# 保留所有出现过的字（共约 300-400 字）
vocab_chars = ["[PAD]", "[UNK]"] + sorted(set(all_chars))  
char_to_id = {char: idx for idx, char in enumerate(vocab_chars)} #字典推导式   {... for ... in ...}  for前面是每次循环产出物，for和in之间是每次循环时变量，in后面是可迭代数据，enumerate是将字符串列变为元组列表[(0, '[PAD]'), (1, '[UNK]'), (2, '这'), (3, '个'), (4, '手')]
vocab_size = len(vocab_chars)

# ()是元组
# []是列表，同list
# {}是字典，key value形式，同map

print(f"✅ 词表大小: {vocab_size} (含 [PAD], [UNK])")

# 保存词表（供 Android 使用）
with open("chinese_vocab.json", "w", encoding="utf-8") as f:
    json.dump(char_to_id, f, ensure_ascii=False, indent=2)   
    
#json.dump   将字典以json格式写入文件中，无返回值
#json.dumps  将字典转为json字符串


# --- Step 3: 文本转 ID ---
max_len = 32  # 最大长度

def text_to_ids(text, char_to_id, max_len):
    ids = []
    for char in text[:max_len]:
        ids.append(char_to_id.get(char, char_to_id["[UNK]"]))
    ids += [0] * (max_len - len(ids))  # [PAD] = 0
    return ids

x_train = np.array([text_to_ids(t, char_to_id, max_len) for t in train_texts])
y_train = np.array(train_labels)
x_test = np.array([text_to_ids(t, char_to_id, max_len) for t in test_texts])
y_test = np.array(test_labels)

print(f"✅ 输入张量形状: {x_train.shape}")

# --- Step 4: 构建轻量模型 ---
model = tf.keras.Sequential([
    tf.keras.layers.Embedding(vocab_size, 16, input_length=max_len),
    tf.keras.layers.GlobalAveragePooling1D(),
    tf.keras.layers.Dense(16, activation='relu'),
    tf.keras.layers.Dropout(0.4),
    tf.keras.layers.Dense(1, activation='sigmoid')
])

model.compile(
    optimizer='adam',
    loss='binary_crossentropy',
    metrics=['accuracy']
)

print("\n✅ 模型结构:")
model.summary()

# --- Step 5: 训练模型 ---
print("\n🚀 开始训练...")
history = model.fit(
    x_train, y_train,
    epochs=30,
    batch_size=16,
    validation_split=0.1,
    verbose=1
)

# --- Step 6: 测试准确率 ---
test_loss, test_acc = model.evaluate(x_test, y_test, verbose=0)
print(f"\n✅ 测试准确率: {test_acc:.2f}")

# --- Step 7: 验证预测效果 ---
def predict_text(text):
    ids = text_to_ids(text, char_to_id, max_len)
    x = np.array([ids])
    prob = model.predict(x, verbose=0)[0][0]
    return "正面" if prob > 0.5 else "负面", prob

print("\n🔍 预测测试:")
test_cases = [
    "这个手机拍照效果太棒了",
    "客服态度恶劣，完全不解决问题",
    "物流超快，第二天就到了",
    "屏幕有坏点，要求退货被拒",
    "音质清晰，低音澎湃",
    "系统卡顿到无法使用"
]

for text in test_cases:
    label, prob = predict_text(text)
    print(f"\"{text}\" → {label} ({prob:.4f})")

# --- Step 8: 导出 TFLite 模型 ---
print("\n📦 正在导出 TFLite 模型...")
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_model = converter.convert()

with open("chinese_sentiment.tflite", "wb") as f:
    f.write(tflite_model)

print(f"✅ TFLite 模型大小: {len(tflite_model) / 1024:.1f} KB")

# --- Step 9: 下载文件 ---
# from google.colab import files
# print("\n📥 准备下载文件...")
# files.download("chinese_sentiment.tflite")
# files.download("chinese_vocab.json")

# 保存vocal词表到当前目录
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
VOCAB_PATH = os.path.join(SCRIPT_DIR, "chinese_vocab.json")
with open(VOCAB_PATH, "w", encoding="utf-8") as f:
    json.dump(char_to_id, f, ensure_ascii=False, indent=2)
print(f"✅ 词表已保存: {VOCAB_PATH}")

# --- Step 8: 导出 TFLite ---
print("\n📦 导出 TFLite 模型...")
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_model = converter.convert()

TFLITE_PATH = os.path.join(SCRIPT_DIR, "chinese_sentiment.tflite")
with open(TFLITE_PATH, "wb") as f:
    f.write(tflite_model)

print(f"✅ TFLite 模型已保存: {TFLITE_PATH}")
print(f"📦 模型大小: {len(tflite_model) / 1024:.1f} KB")

print("\n🎉 完成！请将两个文件用于 Android 集成。")