#!/usr/bin/env python
#encoding=utf-8

import os, re, codecs
import jieba


data_dir = '../'
dst_dir = './'
file_in_path = os.path.join(data_dir, 'part-00000')
file_outs_paths = map(lambda item: os.path.join(item[0], item[1]),
                      list(zip([dst_dir] * 3, ['sim_question_train.txt', 'sim_question_valid.txt', 'sim_question_test.txt'])))

# original data: 97951, 12373
# noew data:     110331, 1454
split_scales = [0.8, 0.1, 0.1]

set_stopwords = set([])

split_word = u'@@@@@'

def generateStopwords():
    stopwords_path = os.path.join(data_dir, 'stopwords.txt')
    with codecs.open(stopwords_path, 'r', encoding='utf-8') as f:
        for line in f.readlines():
            set_stopwords.add(line.strip())

def analyseDataFile(data_path):
    with codecs.open(data_path, encoding='utf8') as file:
        cnt_normal = 0
        cnt_error = 0

        cnt_has_p = 0
        cnt_without_p = 0
        for line in file.readlines():
            items = line.strip().split('</p>')
            if len(items) > 1:
                cnt_has_p += 1
            else:
                cnt_without_p += 1

            ps = re.findall(r"<p>(.*?)</p>", line.strip(), re.I)
            if ps:
                # print(line.strip())
                print(ps)
                cnt_normal += 1
            else:
                cnt_error += 1

        print(cnt_normal, cnt_error)
        print(cnt_has_p, cnt_without_p)

def strip_comma(s):
    start = 0
    for i in range(len(s)):
        if s[i] != ',':
            start = i
            break
    end = len(s) - 1
    for i in range(len(s) - 1, -1, -1):
        if s[i] != ',':
            end = i
            break
    return s[start:end + 1]

def generateTrainValidData(data_path):
    groupInfo_with_keywords = {}
    cnt_normal = 0
    cnt_error = 0
    with codecs.open(data_path, encoding='utf8') as file:
        # cnt = 0
        for line in file.readlines():
            line = line.strip()
            items = line.split('@@@@@')
            if items[1] not in groupInfo_with_keywords:
                groupInfo_with_keywords[items[1]] = []
            groupInfo_with_keywords[items[1]].append([items[2], items[3]])
            # cnt += 1
            # if cnt > 1000:
            #     break

    print (len(groupInfo_with_keywords))
    import numpy as np
    groupInfo_with_keywords = [([key], value) for key, value in groupInfo_with_keywords.items()]
    dataset = np.array(groupInfo_with_keywords)
    print ("==================")
    print (dataset[0:2])

    np.random.shuffle(dataset)

    def getSplits(length):
        idx1 = int(length * split_scales[0])
        idx2 = int(length * (split_scales[0] + split_scales[1]))
        return [(0, idx1), (idx1, idx2), (idx2, length)]

    split_idxs = getSplits(dataset.shape[0])

    for idx, file_path in enumerate(file_outs_paths):
        start, end = split_idxs[idx]
        data = dataset[start:end]
        with codecs.open(file_path, 'w+', encoding='utf-8') as f_out:
            for contents, keywords_marks in data:
                contents_cuted = [' '.join(jieba.cut(content)) for content in contents]
                line = u""
                for key, mark in keywords_marks:
                    words = ' '.join(jieba.cut(key))
                    line += u' '.join(contents_cuted) + split_word + words + split_word + str(mark) + u'\n'
                f_out.write(line)
            print ("Write data to file: ", file_path)

    print ("Total count of article data is: ", cnt_normal + cnt_error)
    print ("Error count of article data is: ", cnt_error)


if __name__ == "__main__":
    generateStopwords()
    generateTrainValidData(file_in_path)

