#!/usr/bin/env python
#encoding=utf-8

import os, re, codecs
import jieba


data_dir = '../'
dst_dir = './'
file_in_path = os.path.join(data_dir, 'ads.txt')
file_outs_paths = map(lambda item: os.path.join(item[0], item[1]),
                      list(zip([dst_dir] * 2, ['ads_train.txt', 'ads_valid.txt'])))

test_data_in_path = os.path.join(data_dir, 'ads_test.txt')
test_data_out_path = os.path.join(dst_dir, 'ads_test.txt')

# original data: 97951, 12373
# noew data:     110331, 1454
split_scales = [0.9, 0.1]

set_stopwords = set([])

def generateStopwords():
    stopwords_path = os.path.join(data_dir, 'stopwords.txt')
    with codecs.open(stopwords_path, 'r', encoding='utf-8') as f:
        for line in f.readlines():
            set_stopwords.add(line.strip())

def analyseDataFile(data_path):
    with codecs.open(data_path, encoding='utf8') as file:
        cnt_normal = 0
        cnt_error = 0

        cnt_has_p = 0
        cnt_without_p = 0
        for line in file.readlines():
            items = line.strip().split('</p>')
            if len(items) > 1:
                cnt_has_p += 1
            else:
                cnt_without_p += 1

            ps = re.findall(r"<p>(.*?)</p>", line.strip(), re.I)
            if ps:
                # print(line.strip())
                print(ps)
                cnt_normal += 1
            else:
                cnt_error += 1

        print(cnt_normal, cnt_error)
        print(cnt_has_p, cnt_without_p)

def strip_comma(s):
    start = 0
    for i in range(len(s)):
        if s[i] != ',':
            start = i
            break
    end = len(s) - 1
    for i in range(len(s) - 1, -1, -1):
        if s[i] != ',':
            end = i
            break
    return s[start:end + 1]

def generateTrainValidData(data_path):
    groupInfo_with_keywords = []
    cnt_normal = 0
    cnt_error = 0
    with codecs.open(data_path, encoding='utf8') as file:

        for line in file.readlines():
            line = line.strip()
            items = line.split('</p>')
            if len(items) > 1:
                keywords_str = items[-1]
                keywords_str = strip_comma(keywords_str)
                ks = keywords_str.split('\t')
                if (len(ks) - 4) % 6 != 0:
                    cnt_error += 1
                else:
                    cnt_normal += 1
                    ks = ks[4:]
                    keywords_marks = []
                    for i in range(int(len(ks) / 6)):
                        keyword = ks[i * 6]
                        if ks[i * 6 + 4] == u'三人评分不相等':
                            mark = int((int(ks[i * 6 + 1]) + int(ks[i * 6 + 2]) + int(ks[i * 6 + 3])) / 3)
                        else:
                            mark = int(ks[i * 6 + 5])
                        keywords_marks.append((keyword, mark))

                    contents = []
                    ps = re.findall(r"<p>(.*?)</p>", line, re.I)
                    if ps:
                        contents.extend([p.encode('utf-8') for p in ps])
                    else:
                        pass
                    # contents = contents[0:30]
                    groupInfo_with_keywords.append((contents, keywords_marks))
            else:
                pass

    import numpy as np

    dataset = np.array(groupInfo_with_keywords)

    np.random.shuffle(dataset)



    def getSplits(length):
        idx1 = int(length * split_scales[0])
        idx2 = int(length * (split_scales[0] + split_scales[1]))
        return [(0, idx1), (idx1, idx2), (idx2, length)]

    split_idxs = getSplits(dataset.shape[0])

    for idx, file_path in enumerate(file_outs_paths):
        start, end = split_idxs[idx]
        data = dataset[start:end]
        with codecs.open(file_path, 'w+', encoding='utf-8') as f_out:
            for contents, keywords_marks in data:
                contents_cuted = [
                    " ".join([word for word in jieba.cut(content.replace("广告标题：", "").replace("商品名: ", "").replace("广告描述：", "")) if word not in set_stopwords]) for content
                    in contents]
                keywords_marks2 = [(key, 1 if int(mark) >= 2 else 0) for key, mark in keywords_marks]
                line = ""
                for key, mark in keywords_marks2:
                    line += ' '.join(contents_cuted) + '\t' + key + '\t' + str(mark) + '\n'
                f_out.write(line)
            print "Write data to file: ", file_path

    print "Total count of article data is: ", cnt_normal + cnt_error
    print "Error count of article data is: ", cnt_error

def generateTestData(file_in_path, file_out_path):
    with codecs.open(file_in_path, 'r', encoding='utf8') as IN, \
        codecs.open(file_out_path, 'w+', encoding='utf8') as OUT:
        for line in IN:

            info = line.strip().split("\tx\t")

            # ptn = re.compile(u'^广告标题：(.+)$|^广告标题：(.+)<br>广告描述：(.+)$|^广告标题：(.+)<br>商品名：(.+)$|^广告标题：(.+)<br>商品名：(.+)<br>广告描述：(.+)$')
            seg_ptn = re.compile(
                u"！|？|。|；|…|，|、|：|“|”|‘|’|《|》|｛|｝|【|】|（|）|　|[!\?,;\"{}\[\]\(\) ]|(?:(?:(?:(?:mailto|ssh|ftp|https?)://)?(?:[a-z0-9]+(?:\:\d+)?\@)?(?:(?:[a-z0-9](?:[-a-z0-9]*[a-z0-9])?\.)+(?:com|net|edu|biz|gov|org|in(?:t|fo)|(?:[a-z][a-z]))|(?:[01]?\d\d?|2[0-4]\d|25[0-5])\.(?:[01]?\d\d?|2[0-4]\d|25[0-5])\.(?:[01]?\d\d?|2[0-4]\d|25[0-5])\.(?:[01]?\d\d?|2[0-4]\d|25[0-5])))(?:\:(?:\d{1,5}))?(?:\/[ -~]*)*)")

            texts2 = []
            texts = info[0].split('<br>')
            for text in texts:
                text = text.replace(u'广告标题：', '').replace(u'广告描述：', '').replace(u'商品名：', '')
                texts2 += [splitedLine for splitedLine in seg_ptn.split(text) if len(splitedLine) != 0]

            kw_marks = [item.split(":") for item in info[1].split(';')]

            query = ' '.join([' '.join([word for word in jieba.cut(text) if word not in set_stopwords]) for text in texts2])
            docs = kw_marks
            line = ""
            for doc in docs:
                line += query + '\t' + doc[0].strip() + '\t' + ('1' if int(doc[1].strip()) >= 2 else '0') + '\n'

            OUT.write(line)
    print "Write data to file: " + file_out_path

if __name__ == "__main__":
    generateStopwords()
    generateTrainValidData(file_in_path)
    generateTestData(test_data_in_path, test_data_out_path)

