import re,json,sqlite3,copy
import jieba

import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences

def remove_punctuation(word):
    pattern = r'[^\u4e00-\u9fa5\w]'
    return re.sub(pattern, '', word)

def txt2chapters():
    """"""
    chapters = {}
    with open('toutiao_cat_data.txt',"r",encoding="utf-8") as file:
        for line in file:
            liststr = line.split('_!_')
            if liststr[2] not in chapters:
                chapters[liststr[2]] = [liststr[3]]
            else:
                chapters[liststr[2]].append(liststr[3])
    return chapters

def db2chapters():
    conn = sqlite3.connect('字词典.db')
    cursor = conn.cursor()
    cursor.execute("SELECT categorize,news FROM toutiao")
    rows = cursor.fetchall()
    conn.commit()
    cursor.close()
    conn.close()
    chapters = {}
    for row in rows:
        categorize = row[0]
        chapters[categorize] = row[1].split('_!_')
    return chapters

def jieba2db(chapters):
    x_train = []
    y_train = []
    for categorize in chapters:
        for news in chapters[categorize]:
            x_train.append(" ".join(list(jieba.lcut_for_search(news))))
            if (categorize == "news_tech"):
                y_train.append(1)
            else:
                y_train.append(0)
    conn = sqlite3.connect('字词典.db')
    cursor = conn.cursor()
    insert_statement = """
        INSERT INTO toutiao_jieba (x_train,y_train) VALUES (?,?)
    """
    data_to_insert = [(x_train[x],y_train[x]) for x in range(len(x_train))]
    cursor.executemany(insert_statement,data_to_insert)
    conn.commit()
    cursor.close()
    conn.close()

def db2dataset():
    x_train = []
    y_train = []
    conn = sqlite3.connect('字词典.db')
    cursor = conn.cursor()
    cursor.execute("SELECT x_train,y_train FROM toutiao_jieba")
    rows = cursor.fetchall()
    conn.commit()
    cursor.close()
    conn.close()
    max_sequence_length = 0
    for row in rows:
        x_train.append(row[0])
        y_train.append(row[1])
        max_sequence_length = max(max_sequence_length,len(row[0]))
    vocab_size = 5000
    tokenizer = Tokenizer(num_words=vocab_size)
    tokenizer.fit_on_texts(x_train)
    sequences = tokenizer.texts_to_sequences(x_train)
    padded_sequences = pad_sequences(sequences, maxlen=max_sequence_length, padding='post')  
    dataset = tf.data.Dataset.from_tensor_slices((padded_sequences, y_train))
    dataset = dataset.shuffle(buffer_size=300000)
    dataset = dataset.batch(32)
    return dataset,max_sequence_length

def print_dataset_element(db2dataset, n=1):
    for element in db2dataset().take(n):
        print_dataset_element(element)
        x, y = element
        for i in range(32):
            print(y[i].numpy(),x[i].numpy())

def chapters2vocab(chapters):
    jieba_set = set()
    for categorize in chapters:
        for news in chapters[categorize]:
            jieba_set.update(list(jieba.lcut_for_search(news)))
    return jieba_set

def vocab2db(jieba_set):
    conn = sqlite3.connect('字词典.db')
    cursor = conn.cursor()
    insert_statement = """
        INSERT INTO toutiao_vocab (vocab) VALUES (?)
    """
    data_to_insert = [(word,) for word in jieba_set]
    cursor.executemany(insert_statement,data_to_insert)
    conn.commit()
    cursor.close()
    conn.close()

def db2vocab():
    conn = sqlite3.connect('字词典.db')
    cursor = conn.cursor()
    cursor.execute("SELECT vocab FROM toutiao_vocab")
    rows = cursor.fetchall()
    conn.commit()
    cursor.close()
    conn.close()
    jieba_set = set()
    for row in rows:
        jieba_set.add(row[0])
    return jieba_set

def create_table():
    conn = sqlite3.connect('字词典.db')
    cursor = conn.cursor()
    cursor.execute("""
        /**/
        CREATE TABLE IF NOT EXISTS toutiao (
            id INTEGER PRIMARY KEY AUTOINCREMENT,  --
            categorize TEXT NOT NULL,
            news TEXT
        )
    """)
    cursor.execute("""
        CREATE TABLE IF NOT EXISTS toutiao_vocab (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            vocab TEXT NOT NULL
        )
    """)
    cursor.execute("""
        CREATE TABLE IF NOT EXISTS toutiao_jieba (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            x_train TEXT NOT NULL,
            y_train INTEGER
        )
    """)
    conn.commit()
    cursor.close()
    conn.close()

def word2db(chapters):
    conn = sqlite3.connect('字词典.db')
    cursor = conn.cursor()
    insert_statement = """
        INSERT INTO toutiao (categorize,news) VALUES (?,?)
    """
    data_to_insert = [(chapter,"_!_".join(chapters[chapter])) for chapter in chapters]
    cursor.executemany(insert_statement,data_to_insert)
    conn.commit()
    cursor.close()
    conn.close()