# import torch
# import torch.nn as nn
# import numpy as np
# from sklearn.datasets import make_classification
# import torch.optim as optim
# import torch.nn.functional as F
# from torch.utils.data import TensorDataset, DataLoader
#
# x, y = make_classification(n_samples=500,  # 样本数量
#                            n_features=4,  # 特征数量
#                            n_informative=4,  # 有效特征数量
#                            n_redundant=0,  # 冗余特征数量
#                            n_clusters_per_class=1,  # 每个类别的簇数量
#                            n_classes=10,
#                            random_state=42)  # 随机种子
# x = torch.tensor(x, dtype=torch.float32)
# y = torch.tensor(y, dtype=torch.long)
#
# x_test, y_test = make_classification(n_samples=200,  # 样本数量
#                            n_features=4,  # 特征数量
#                            n_informative=4,  # 有效特征数量
#                            n_redundant=0,  # 冗余特征数量
#                            n_clusters_per_class=1,  # 每个类别的簇数量
#                            n_classes=10,
#                            random_state=42)  # 随机种子
# x_test = torch.tensor(x_test, dtype=torch.float32)
# y_test = torch.tensor(y_test, dtype=torch.long)
#
#
# class MyModel(nn.Module):
#     def __init__(self):
#         super(MyModel, self).__init__()
#         self.fc1 = nn.Linear(in_features=4, out_features=128)
#         self.fc2 = nn.Linear(in_features=128, out_features=64)
#         self.fc3 = nn.Linear(in_features=64, out_features=32)
#         self.fc4 = nn.Linear(in_features=32, out_features=10)
#
#     def forward(self, x):
#         x = F.relu(self.fc1(x))
#         x = F.relu(self.fc2(x))
#         x = F.relu(self.fc3(x))
#         x = self.fc4(x)
#         return x
#
#
# model = MyModel()
#
# criterion = nn.CrossEntropyLoss()  # 交叉熵损失函数，常用于分类任务
# optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# epochs = 50
#
# # 创建数据集
# dataset = TensorDataset(x, y)
# dataloader = DataLoader(dataset, batch_size=32, shuffle=True)
#
# for epoch in range(epochs):
#     model.train()
#     for inputs, targets in dataloader:
#         optimizer.zero_grad()
#         output = model(inputs)
#         loss = criterion(output, targets)
#         loss.backward()
#         optimizer.step()
#     print(f'Epoch [{epoch + 1}/{epochs}], Loss: {loss.item():.4f}')
#
# model.eval()
# with torch.no_grad():
#     output = model(x_test)
#     loss = criterion(output, y_test)
#     print(f'Test Loss: {loss.item():.4f}')

# import numpy as np
#
# array=['a','b','c']
#
# np.save("./data/array.npy",array)
#
# load_array = np.load("./data/array.npy")
# print(load_array)
# print(load_array[0])

# import csv
#
# # 创建一个字典（注意：JSON 不支持复杂类型，如函数、自定义对象等）
# my_dict = {"key1": "value1", "key2": 123, "key3": [1, 2, 3]}
#
# # 使用 CSV 保存字典到文件
# with open("./data/my_dict.csv", "w", newline="") as f:
#     writer = csv.writer(f)
#     for key, value in my_dict.items():
#         writer.writerow([key, value])
#
# print("字典已保存到 my_dict.csv")

# # 使用 CSV 从文件加载字典
# loaded_dict = {}
# with open("my_dict.csv", "r") as f:
#     reader = csv.reader(f)
#     for row in reader:
#         key, value = row
#         loaded_dict[key] = value  # 如果需要，可以将值转换为适当类型
#
# print("从文件加载的字典：", loaded_dict)

# from datasets import load_dataset
# import numpy as np
#
# dataset = load_dataset("Congliu/Chinese-DeepSeek-R1-Distill-data-110k")
#
# input_sentence=dataset["train"][:5]['input']
# output_sentence=dataset["train"][:5]['content']
#
# np.save("./dialog/input_sentence.npy", input_sentence)
# np.save("./dialog/output_sentence.npy", output_sentence)

# import numpy as np
#
# input_sentence=np.load("./dialog/input_sentence.npy")
# output_sentence=np.load("./dialog/output_sentence.npy")
#
# print(input_sentence[0])
# print(output_sentence[0])

import numpy as np
from datasets import load_dataset

dataset = load_dataset("OpenStellarTeam/Chinese-SimpleQA")

input_sentence = dataset["train"][:200]['question']
output_sentence = dataset["train"][:200]['answer']

np.save("./dialog/input_simpleQA.npy", input_sentence)
np.save("./dialog/output_simpleQA.npy", output_sentence)

# import numpy as np
# input_sentence=np.load("./dialog/input_simpleQA.npy").tolist()
# output_sentence=np.load("./dialog/output_simpleQA.npy").tolist()
# print(input_sentence)
# print(output_sentence)

# -*- coding:utf-8 -*-
# @Time   : 2025-01-01
# @Author : Carl_DJ


# from gtts import gTTS
# import os
#
# text = "Hello,my Dears"
# tts = gTTS(text=text, lang='en')
# tts.save("output.mp3")
# os.system("start output.mp3")

# from transformers import BertTokenizer
# import jieba
#
# # 加载预训练的 BERT 分词器
# tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased')
#
# # 示例文本
# text = "你好，你的名字，从前有座山，山上有座庙，庙里有两个和尚World"
#
# all_words=[]
# result=jieba.cut(text)
# all_words.extend(result)
# print(all_words)
#
# # 使用分词器切词
# tokens = tokenizer.tokenize(text)
# print("Tokens:", tokens)
#
# # 获取 Token IDs
# token_ids = tokenizer.convert_tokens_to_ids(tokens)
# print("Token IDs:", token_ids)

