#coding: UTF-8

'''
数据文件
'''

import sys
import csv
import nltk
nstem = nltk.stem.WordNetLemmatizer()
ntoken = nltk.tokenize.WordPunctTokenizer()


def load_csv(fname):
    f = open(fname)
    reader = csv.DictReader(f)
    for row in reader:
        yield row

def stem(word):
    try:
        return nstem.lemmatize(word)
    except:
        return word
    #word = word.rstrip("ly").rstrip("ing")
    #if len(word) > 6:
    #    word = word.rstrip("s")
    #return word

def stem_line(sent):
    #sent = ' '.join(ntoken.tokenize(sent.decode("UTF-8"))).encode("UTF-8")
    tokens = sent.decode("UTF-8").split(" ")
    ntk = []
    for i in tokens:
        ntk.append(stem(i))
    return ' '.join(ntk)

def load_train_data_label(fname):

    x1 = []
    x2 = []
    y = []

    for sp in load_csv(fname):
        q1 = stem_line(sp["question1"].lower().strip("?")).encode("UTF-8")
        q2 = stem_line(sp["question2"].lower().strip("?")).encode("UTF-8")

        label = sp["is_duplicate"]
        x1.append(q1)
        x2.append(q2)
        y.append(label)

    return x1, x2, y


def load_test_data(fname):
    x1 = []
    x2 = []
    test_id = []
    for sp in load_csv(fname):
        q1 = stem_line(sp["question1"].lower().strip("?")).encode("UTF-8")
        q2 = stem_line(sp["question2"].lower().strip("?")).encode("UTF-8")

        x1.append(q1)
        x2.append(q2)
        test_id.append(sp["test_id"])
    return x1, x2, test_id


#input string 
#output vector of term
def decode_sent(sent):
    sent = sent.split(" ")
    i = []
    for i in sent:
        i = i.strip()
        if len(i) > 0:
            res.append(i)
    return res

#def build_vocab()


import numpy as np

def batch_iter(data, batch_size, num_epochs, shuffle=True):
    """
    Generates a batch iterator for a dataset.
    """
    data = np.array(data)
    data_size = len(data)
    num_batches_per_epoch = int((len(data)-1)/batch_size) + 1
    for epoch in range(num_epochs):
        # Shuffle the data at each epoch
        if shuffle:
            shuffle_indices = np.random.permutation(np.arange(data_size))
            shuffled_data = data[shuffle_indices]
        else:
            shuffled_data = data
        for batch_num in range(num_batches_per_epoch):
            start_index = batch_num * batch_size
            end_index = min((batch_num + 1) * batch_size, data_size)
            yield shuffled_data[start_index:end_index]



if __name__ == "__main__":
    for sp in load_csv("../data/train.csv"):
        print sp