import pandas as pd
import jieba
import re

from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
import joblib

zhPattern = re.compile(r'[\u4e00-\u9fa5]+')


def dict_join():
    data = pd.read_csv('mycsv.csv', encoding='utf-8')

    data_dic = []

    for da in data['content']:  # 提取向量字典
        try:
            fi = jieba.cut(da)
            for f in fi:
                if f not in data_dic:

                    if not zhPattern.search(f) is None:
                        data_dic.append(f)

        except Exception as e:
            print(e)

    with open('dic_data.dict', 'w', encoding='utf-8') as fp:
        data_str = ','.join(data_dic)
        fp.write(data_str)


def create_data():
    with open('dic_data.dict', 'r', encoding='utf-8') as fp:
        data_dict = fp.read().split(',')

    f = pd.read_csv('mycsv.csv')

    data_content = []  # 数据

    data_head = data_dict[:]
    data_head.append('types')  # 表头
    for da, typ in zip(f['content'], f['types']):

        content = []
        da = str(da)  # 保证是字符串

        for dic in data_dict:  # 查询分词结果
            content.append(da.count(dic))

        content.append(typ)  # 添加类型

        data_content.append(content)  # 添加到大的

    print(data_content)

    data_df = pd.DataFrame(data_content, columns=data_head)

    print(data_df)

    data_df.to_csv('resrt.csv')


def create_model():
    pf = pd.read_csv('resrt.csv')
    print(pf)

    x = pf.iloc[:, 0:-1]
    y = pf.iloc[:, -1]

    clt = GaussianNB()
    x_train, x_test, Y_train, Y_test = train_test_split(x, y, train_size=0.8)

    print(x_train.shape)

    clt.fit(x_train, Y_train)

    joblib.dump(clt, "bytes_model_two.pkl")


create_model()
