# coding=utf8

from __future__ import print_function

import sys
import io
import numpy as np
import csv
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
import zlib

import pickle
from collections import OrderedDict

reload(sys)
sys.setdefaultencoding('utf8')

datdir = '/Users/vista/PycharmProjects/data/movie_comments/'

org_train_file = datdir + 'training.1600000.processed.noemoticon.csv'
org_test_file = datdir + 'testdata.manual.2009.06.14.csv'

# 0 – the polarity of the tweet (0 = negative, 2 = neutral, 4 = positive)
# 1 – the id of the tweet (2087)
# 2 – the date of the tweet (Sat May 16 23:58:44 UTC 2009)
# 3 – the query (lyx). If there is no query, then this value is NO_QUERY.
# 4 – the user that tweeted (robotickilldozr)
# 5 – the text of the tweet (Lyx is cool)

# 提取文件中有用的字段
def create_lexicon(org_file):
    lex = []
    lemmatizer = WordNetLemmatizer()
    with io.open(org_file, buffering=10000, encoding='latin-1') as f:
        spamreader = csv.reader(f)

        # 统计单词出现次数
        count_word = {}

        okline = 0

        for line in spamreader:  # "4","2193601966","Tue Jun 16 08:40:49 PDT 2009","NO_QUERY","AmandaMarie1028","Just woke up. Having no school is the best feeling ever "

            okline += 1

            # clf = []
            # if line[0] == '0':
            #     clf = [0, 0, 1]  # 消极评论
            # elif line[0] == '2':
            #     clf = [0, 1, 0]  # 中性评论
            # elif line[0] == '4':
            #     clf = [1, 0, 0]  # 积极评论

            try:
                tweet = line[-1]

                words = word_tokenize(tweet.lower())

                for word in words:
                    word = lemmatizer.lemmatize(word)
                    if word not in count_word:
                        count_word[word] = 1
                    else:
                        count_word[word] += 1

                if okline%10000 == 0:
                    print (okline)
            except Exception as e:
                print (e)
                continue


        print (okline)

        count_word = OrderedDict(sorted(count_word.items(), key=lambda t: t[1]))

        for word in count_word:
            if count_word[word] < 100000 and count_word[word] > 100:  # 过滤掉一些词
                lex.append(word)

        f.close()

        return lex


def conv_dataset_vector(test_file, lex0):
    with io.open(test_file, encoding='latin-1') as f:
        spamreader = csv.reader(f)
        features = []
        labels = []
        lemmatizer = WordNetLemmatizer()

        linenum = 0

        for line in spamreader:
            linenum += 1
            try:
                label = []
                if line[0] == '0':
                    label = [0, 0, 1]  # 消极评论
                elif line[0] == '2':
                    label = [0, 1, 0]  # 中性评论
                elif line[0] == '4':
                    label = [1, 0, 0]  # 积极评论

                tweet = line[-1]

                words = word_tokenize(tweet.lower())

                words = [lemmatizer.lemmatize(word) for word in words]

                feature = np.zeros(len(lex0))

                for word in words:
                    if word in lex0:
                        feature[lex0.index(word)] = 1

                features.append(list(feature))
                labels.append(label)
            except Exception as e:
                print (e)
            if linenum % 10000 == 0:
                print (linenum)
    return features, labels


def conv_dataset_vector_2(test_file, lexdict):
    with io.open(test_file, encoding='latin-1') as f:
        spamreader = csv.reader(f)
        features = []
        lemmatizer = WordNetLemmatizer()

        linenum = 0

        for line in spamreader:
            linenum += 1
            try:
                label = []
                if line[0] == '0':
                    label = [0, 0, 1]  # 消极评论
                elif line[0] == '2':
                    label = [0, 1, 0]  # 中性评论
                elif line[0] == '4':
                    label = [1, 0, 0]  # 积极评论

                tweet = line[-1]

                words = word_tokenize(tweet.lower())

                words = [lemmatizer.lemmatize(word) for word in words]

                feature = [lexdict[x] for x in words if lexdict.has_key(x)]

                feature = [feature, label]

                if len(feature[0]) > 0:

                    features.append(feature)

            except Exception as e:
                print (e)
            if linenum % 10000 == 0:
                print (linenum)
    return features


def save_lex():
    with io.open(datdir + 'lexcion.pickle', 'wb') as f:
        lex = create_lexicon(org_train_file)
        pickle.dump(lex, f)
        f.close()

def getlex():
    f = io.open(datdir + 'lexcion.pickle', 'rb')
    lex = pickle.load(f)
    f.close()
    return lex

def save_vec():

    lex = getlex()

    index = range(len(lex))

    lexdict = dict(zip(lex,index))

    with io.open(datdir + 'les2.train.pickle', 'wb') as f:
        fea = conv_dataset_vector_2(org_train_file,lexdict)
        npfea = np.array(fea)
        # perm = np.arange(len(fea))
        # np.random.shuffle(perm)
        # npfea = npfea[perm]
        pickle.dump(npfea, f)

    with io.open(datdir + 'les2.test.pickle', 'wb') as f:
        fea = conv_dataset_vector_2(org_test_file,lexdict)
        npfea = np.array(fea)
        # perm = np.arange(len(fea))
        # np.random.shuffle(perm)
        # npfea = npfea[perm]
        pickle.dump(npfea, f)


def zip_vec():

    with io.open(datdir + 'les2.test.pickle', 'rb') as f:
        data = f.read()
        dat = zlib.compress(data,zlib.Z_BEST_COMPRESSION)
        ff = io.open(datdir + 'les2.test.pickle.zlib', 'wb')
        ff.write(dat)
        ff.close()


def load_zip_vec():
    ff = io.open(datdir + 'les2.test.pickle.zlib', 'rb')
    data = zlib.decompress(ff.read())
    ff.close()

    dat = pickle.loads(data)

    return dat['features'],dat['labels']


class DataSet(object):
    def __init__(self, features, shape):

        self._num_examples = features.shape[0]

        self._features = features
        self._epochs_completed = 0
        self._index_in_epoch = 0

        self._shape = shape

    @property
    def shape(self):
        return self._shape

    def get_x_labels(self):

        feavet = np.zeros([self._num_examples,self._shape])
        for row in range(self._num_examples):
            for i in range(len(self._features[row][0])):
                feavet[row][self._features[row][0][i]] = 1
        self._x = feavet

        self._labels = np.array([x[1] for x in self._features[:]])


    @property
    def x(self):
        return self._x

    @property
    def labels(self):
        return self._labels

    @property
    def num_examples(self):
        return self._num_examples

    @property
    def epochs_completed(self):
        return self._epochs_completed

    def set_index(self,v):
        self._index_in_epoch = v

    def next_batch(self, batch_size):
        """Return the next `batch_size` examples from this data set."""
        start = self._index_in_epoch

        if(start == 0):
            perm = np.arange(self._num_examples)
            np.random.shuffle(perm)
            self._features = self._features[perm]

        self._index_in_epoch += batch_size
        if self._index_in_epoch > self._num_examples:
            # Finished epoch
            self._epochs_completed += 1
            # Shuffle the data
            perm = np.arange(self._num_examples)
            np.random.shuffle(perm)
            self._features = self._features[perm]
            # Start next epoch
            start = 0
            self._index_in_epoch = batch_size
            assert batch_size <= self._num_examples
        end = self._index_in_epoch
        feavet = np.zeros([batch_size,self._shape])
        for row in range(batch_size):
            for i in range(len(self._features[start+row][0])):
                feavet[row][self._features[start+row][0][i]] = 1

        return feavet, np.array([x[1] for x in self._features[start:end]])


def load_vec(shape):
    with io.open(datdir + 'les2.train.pickle', 'rb') as f:
        train_dat = pickle.load(f)
        f.close()

    with io.open(datdir + 'les2.test.pickle', 'rb') as f:
        test_dat = pickle.load(f)
        f.close()

    # read train data
    class DataSets(object):
        pass

    data_sets = DataSets()

    data_sets.train = DataSet(train_dat,shape)
    data_sets.test = DataSet(test_dat,shape)

    return data_sets