# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : data_preparation.py
# @Author: dongguangwen
# @Date  : 2024-06-30 22:22
import pandas as pd
from sklearn.model_selection import train_test_split
from transformers import BertTokenizer

# 假设企业名称数据存储在CSV文件中，只有一列名为'company_name'
companies_df = pd.read_csv('companies.csv')
print(companies_df)

# 生成模拟数据
sentences = [
    "请介绍一下北京奇虎科技有限公司近几年的经济发展情况？",
    "请介绍一下奇虎科技公司近几年的经济发展情况？"
]

# 假设我们已经知道这些句子中的公司名称
labels = [
    ["O", "O", "O", "O", "O", "O", "B-COMPANY", "I-COMPANY", "I-COMPANY", "I-COMPANY", "I-COMPANY", "I-COMPANY", "O",
     "O", "O", "O", "O"],
    ["O", "O", "O", "O", "O", "O", "B-COMPANY", "I-COMPANY", "O", "O", "O", "O", "O", "O", "O"]
]

# 将数据转换为DataFrame
data = pd.DataFrame({
    'sentence': sentences,
    'labels': labels
})
print(data)

# 分词
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese-simplified')
# tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
data['tokens'] = data['sentence'].apply(lambda x: tokenizer.encode(x, add_special_tokens=True))
print(data)

# 划分训练集和测试集
# train_data, test_data = train_test_split(data, test_size=0.2, random_state=42)