# Deep Learning Extracts Weighing Data

import re
import numpy as np
import torch
from transformers import BertTokenizer, BertModel
from sklearn.cluster import DBSCAN

# 加载预训练的BERT模型和分词器
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
model = BertModel.from_pretrained("bert-base-uncased")

# 准备示例数据（原始数据）
raw_data = [
    "sg0017.80kg",
    "wg0023.45kg",
    "[0A]ST,TR,+  0.060kg[0D]"
    # 更多样本...
]

def extract_features(data):
    features = []
    for text in data:
        inputs = tokenizer(text, return_tensors="pt")
        with torch.no_grad():
            outputs = model(**inputs)
        # 提取 [CLS] 的输出作为特征
        features.append(outputs.last_hidden_state[0][0].numpy())
    return np.array(features)

# 提取特征
features = extract_features(raw_data)

# 使用DBSCAN进行聚类
clustering = DBSCAN(eps=0.5, min_samples=1).fit(features)

# 根据聚类结果提取重量和单位
def extract_weight_and_unit(data, clusters):
    weight_unit_pairs = []
    for i, text in enumerate(data):
        cluster_id = clusters[i]
        weight = re.search(r'(\d+\.\d+|\d+)(kg|g|lbs|oz)', text)
        if weight:
            weight_unit_pairs.append((float(weight.group(1)),weight.group(2), cluster_id))
    return weight_unit_pairs

# 获取提取的重量和单位
weight_units = extract_weight_and_unit(raw_data, clustering.labels_)
print(weight_units)
