import tensorflow as tf
import numpy as np
import json
from collections import Counter

# 1.构建数据

positive_text = [
    "这个手机太好用了","电影太精彩了", "服务非常周到", "产品质量很棒", "体验感非常好",
    "价格很实惠", "物流很快", "客服态度好", "画面清晰流畅", "音质特别好"
]

negative_text = [
    "这个手机太难用了", "垃圾产品", "客服不理人", "屏幕有坏点", "充电特别慢",
    "根本不能用", "虚假宣传", "做工粗糙", "系统老是卡顿", "后悔买了"
]


train_text = positive_text + negative_text
train_label = [10]*1 + [10]*0


# 2.按字构建词表
all_char = []
for text in train_text:
    s = list(text)
    all_char.extend(s)

print(all_char)

char_counts = Counter(all_char)

print(all_char)
seted_char = set(all_char)
print(seted_char)
sorted_char = sorted(seted_char)
print(sorted_char)

vocab_chars = ["[PAD]", "[UNK]"]
vocab_chars += sorted_char
print(vocab_chars)

char_to_id = {}
for index, char in enumerate(vocab_chars):
    char_to_id[char] = index
    print(char, index)

print(char_to_id)

vocab_size = len(vocab_chars)
print("词表大小：", vocab_size)

# 保存词表
# with open("chinese_vocab2.json","w", encoding="utf-8") as f:
#     json.dump(char_to_id, f, ensure_ascii=False, indent=2)


# 3.文本转ID
arr = []
for t in train_text:
    # 这台手机太好了
    ids=[]
    for char in t[:16]:
        ids.append(char_to_id.get(char, char_to_id["[UNK]"]))
arr.append(ids)

print(arr)