
import pymysql
import json
import requests
from typing import List
import pandas as pd
from loguru import logger


class CorpusClassify(object):

    def __init__(self):
        self.conn = pymysql.connect(
            host='172.18.0.66',
            port=3306,
            user='root',
            password='00123.Com',
            database='corpus',
            charset='utf8mb4',
            cursorclass=pymysql.cursors.DictCursor
        )
        self.cursor = self.conn.cursor()
        self.tables = {
            '泰语': 'corpus_sentence_pairs_tai',
            '老挝语': 'corpus_sentence_pairs_lao',
            '越南语': 'corpus_sentence_pairs_vn',
            '马来语': 'corpus_sentence_pairs_malaysia',
        }
        self.url = 'http://region-9.autodl.pro:50644/class'

    def get_count(self, lang: str = '泰语'):
        sql = f'select count(1) as CNT from {self.tables[lang]}'
        self.cursor.execute(sql)
        data = self.cursor.fetchone()
        return data['CNT']

    def fetch(self, lang: str = '泰语', start: int = 0, size: int = None):
        sql = f'SELECT source, target, source_language FROM {self.tables[lang]} limit {start},{size}'
        self.cursor.execute(sql)
        datas = self.cursor.fetchall()
        return datas

    def classify(self, texts: List[str], lang: str = ''):
        req = requests.post(self.url, data=json.dumps({
            "texts": texts,
            "lang": lang
        }))
        res = req.json()
        return res['data']

    def process(self, lang: str = '泰语', batch_size: int = 1000, n_batches: int = None):
        count = self.get_count(lang)
        n_batches = n_batches or int(count / batch_size)
        df_all = []
        for batch_idx in range(n_batches):
            datas = self.fetch(lang, batch_idx, batch_size)
            if datas[0]['source_language'] == '中文':
                lang_texts = [item['target'] for item in datas]
                zh_texts = [item['source'] for item in datas]
            else:
                lang_texts = [item['source'] for item in datas]
                zh_texts = [item['target'] for item in datas]
            class_names = self.classify(lang_texts, lang)
            df = pd.DataFrame({
                'text': lang_texts,
                'chinese': zh_texts,
                'class_name': class_names,
            })
            df_all.append(df)
            logger.info(df.shape)
        pd.concat(df_all, ignore_index=True).to_csv(f'{lang}_cls.csv', index=False)


if __name__ == '__main__':
    cls = CorpusClassify()
    langs = ['泰语', '马来语', '老挝语', '越南语']
    for lang in langs:
        cls.process(lang=lang, batch_size=1000, n_batches=100)