#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@create Time:2019-01-10

@author:Brook
"""
import os
import re
import random
import json
import math

import numpy as np
from sklearn.externals import joblib

from .base import Corpus
from ..util import timing
from ..seg import cut_text
from ..settings import CLF_MODEL_PATH, CORPUS_JSON_PATH, CORPUS_PATH 



def load_xhj():
    corpus_path = CORPUS_PATH
    p = re.compile("^M\s*(?P<ask>[\s\S]*?)\nM\s*(?P<ans>[\s\S]*)")
    data = []
    with open(corpus_path, encoding='utf-8') as f:
        text = f.read()
        for line in text.split("\nE\n"):
            m = p.search(line)
            if m:
                ask = tuple(cut_text(m.group("ask"))) # 对ask分词
                ans = m.group("ans")
                data.append((ask, ans))
    return data


def load_xhj_json():
    corpus_path = CORPUS_JSON_PATH
    with open(corpus_path) as f:
        data  = json.load(f)
    q, a = data
    return q, a 


class XhjCorpus(Corpus):
    """小黄鸡语料
    """
    def __init__(self):
        q, a = load_xhj_json()
        super().__init__(q, a)


class XhjClusterCorpus(Corpus):
    """含聚类算法小黄鸡语料
        会先对语料做聚类，对新的question也会做个聚类，然后用其所属的类别里的数据与它一起计算相似度
    """
    def __init__(self, wordvec):
        """
        Args
        ----
        wordvec : WordVec实例，主要是为了将语料转换为向量
        """
        self._clf = joblib.load(CLF_MODEL_PATH) # 加载聚类模型
        self._wordvec = wordvec

        q, a = load_xhj_json()
        super().__init__(q, a)
        # 有些机器内存较小，这边采用分步聚类,而非一次性全部聚类
        labels = self._cluster(q)
        self._questions_category = {}
        for label in set(labels):
             self._questions_category[label] = [i for i, lb in enumerate(labels) if lb == label]

    def _cluster(self, questions, batch_size=1000):
        """分步聚类
        """
        batch = math.ceil(len(questions)/batch_size)
        preds = []
        for i in range(batch):
            begin = i * batch_size 
            stop = begin + batch_size
            pred = self._clf.predict(self._wordvec.docs2vec(questions[begin:stop]))
            preds.extend(pred.tolist())
        return preds
                     
    def take_samples(self, N, question):
        """重写take_samples方法，对question先找到其所属的类别，再从这个类别中抽样
        """
        feat = self._wordvec.docs2vec([question])
        label = self._clf.predict(feat)[0]
        sub_questions_index = self._questions_category[label]
        samples_index = random.sample(sub_questions_index, min(N, len(sub_questions_index)))
        return samples_index

