# -*- coding: UTF-8 -*-
import os
import sys
import argparse
import datetime
import collections

import numpy as np
import tensorflow as tf

# 数据集目录
data_path = "/Users/zyz/project/private/tensorflow/simple-examples/data"

# 保存训练所得的模型参数文件的目录
save_path = "./save"


parser = argparse.ArgumentParser()
parser.add_argument('--date_path', type=str, default=data_path, help='the path of the data for training and testing')
args = parser.parse_args()

# 如果是py3版本
Py3 = sys.version_info[0] == 3

#将文件根据句末分隔符<eos>来分割
def read_words(filename):
  with tf.gfile.GFile(filename, "r") as f:
    if Py3:
      return f.read().replace('\n', '<eos>').split()
    else:
      return f.read().decode('utf-8').replace('\n', '<eos>').split()

# 构造从单词到唯一整数值的映射
def build_vocab(filename):
  data = read_words(filename)
  counter = collections.Counter(data)
  print "======counter====="
  # print counter.items()
  count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
  # print count_pairs

  words, _ = list(zip(*count_pairs))

  #单词到整数的映射
  word_to_id = dict(zip(words, range(len(words))))

  return word_to_id

#将文件里的单词都替换成独一的整数
def file_to_word_ids(filename, word_to_id):
  data = read_words(filename)
  return [word_to_id[word] for word in data if word in word_to_id]

# 加载所有数据，读取所有单词，把其转成唯一对应的整数值
def load_data():
  # 三个数据集路径
  train_path = os.path.join(data_path, "ptb.train.txt")
  valid_path = os.path.join(data_path, "ptb.valid.txt")
  test_path = os.path.join(data_path, "ptb.test.txt")

  # 建立词汇表，将所有单词转为唯一对应的整数值(id)
  word_to_id = build_vocab(train_path)

  train_data = file_to_word_ids(train_path, word_to_id)
  valid_data = file_to_word_ids(valid_path, word_to_id)
  test_data = file_to_word_ids(test_path, word_to_id)

  # 所有独一的词汇的个数
  vocab_size = len(word_to_id)

  # 反转一个词汇表：为了之后从整数 转为 单词
  id_to_word = dict(zip(word_to_id.values(), word_to_id.keys()))

  # print(word_to_id)
  print("============")
  print(train_data[:10])
  # print("============")
  
  # print(vocab_size)
  # print("============")
  # print(" ".join([id_to_word[x] for x in train_data[:10]]))
  # print("============")
  return train_data, valid_data, test_data, vocab_size, id_to_word

# 生成批次样本
def generate_batches(raw_data, batch_size, num_steps):
  # 将数据转为tensor类型
  raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)
  data_len = tf.size(raw_data)
  batch_len = data_len // batch_size

  # 将数据形状转为 [batch_size, batch_len]
  data = tf.reshape(raw_data[0: batch_size * batch_len],
          [batch_size, batch_len])
  epoch_size = (batch_len - 1) // num_steps

  # rang_input_producer 可以多线程异步的从数据机里提取数据
  i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()

  # 假设一句话是这样：我爱我的祖国和人民
  # 那么，如果x是类似我爱我的祖国
  x = data[:, i * num_steps:(i + 1) * num_steps]
  x.set_shape([batch_size, num_steps])

  y = data[:, i * num_steps + 1: (i + 1) * num_steps + 1]
  y.set_shape([batch_size, num_steps])

  return x, y

# 输入数据
  class Input():
    def __init__(self, batch_size, num_steps, data):
      self.batch_size = batch_size
      self.num_steps = num_steps
      self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
      # input_data 是输入，targets是期望的输出
      slef.input_data, self.targets = generate_batches(data, batch_size, num_steps)

# if __name == "__main__":
load_data()
