# coding: utf-8
import urllib2
import json
from BeautifulSoup import BeautifulStoneSoup
from numpy import array
from numpy.random import standard_normal
from numpy import sqrt
import os
import sys
sys.path.append("..")
import sys
import codecs
sys.stdout = codecs.getwriter('utf8')(sys.stdout)

import numpy, scipy.sparse
from sparsesvd import sparsesvd
from occurrence_matrix_calculator import OccurrenceMatrixConstructor
from tokenizer import Tokenizer
from numpy.random import standard_normal, rand


def print_prob(str, prob):
  if rand() < prob:
    print(str)


def canonical_vector(dimension, i):
  return array([(10 if j==i else 0) for j in range(dimension)])

def generate_canonical_basis(dimension):
  return [canonical_vector(dimension, i) for i in range(dimension)]


def generate_noisy_li_vectors(dimension):
  return [v+standard_normal(dimension) for v in generate_canonical_basis(dimension)]

import os

def get_news_from(month, day_no):
  i = day_no
  number = '0' + str(i+1) if (i+1)<10 else str(i+1)
  directory = month
  filename = month + "/" + month.replace("_", '') + number + ".json"
  prefix = "http://calamaro.exp.dc.uba.ar/incc/"

  url = prefix + filename

  if not os.path.exists(directory):
    os.makedirs(directory)
  if not os.path.exists(filename):
    print ("Getting %s" % url)
    raw_news_from_that_day = urllib2.urlopen(url).read()
    f = open(filename, "w+")
    f.write(raw_news_from_that_day)
    f.close()
  
  f = open(filename, "r+")

  #raw_news_from_that_day = BeautifulStoneSoup(urllib2.urlopen(url).read(), convertEntities=BeautifulStoneSoup.XML_SPECIAL_CHARS_TO_ENTITIES).contents[0]
  raw_news_from_that_day = f.read()
  f.close()
  #de aca obtengo un array de noticias...solo me interesa el cuerpo
  news_from_that_day = json.loads(raw_news_from_that_day)
  # Hago un único string de las noticias por cada día
  news_from_that_day = map(lambda n:{'text':n["cuerpo"][0], 'filename':filename}, news_from_that_day)
  #joined_news = " ".join(news_from_that_day)
  #news.extend({"day": month["dir"] +"_"+ number, 'text': joined_news})
  return news_from_that_day
  

def get_news():
  # el peor scrapping del universo, pero LA PAJA
  dirs = [{'dir': "2011_1", 'size': 31} , {'dir': "2011_2", 'size': 28}, {'dir': "2011_3", 'size':31}, {'dir': "2011_4", 'size': 30}, {'dir': "2011_5", 'size': 31}, {'dir': "2011_6", 'size': 30}]
  
  news = []

  for month in dirs:
    for i in range(2):#range(month["size"]):#range(month["size"]):
      news.extend(get_news_from(month["dir"], i))
  return news

def dot_prod(u, i, j):
  ret = 0
  for k in range(u.shape[1]):
    ret+= u[i,k] * u[j,k]
  return ret

def are_similar(u, i, j, cosine_threshold):
  dp = dot_prod(u, i, j)
  u_i_mod = sqrt(dot_prod(u, i, i))
  u_j_mod = sqrt(dot_prod(u, j, j))
  cos = dp / (u_i_mod * u_j_mod)

  if cos > cosine_threshold:
    return True
  else:
    return False

def find_similar_words(u, tokens, cosine_threshold):
  #Itero en los vectores de las palabras
  for i in range(u.shape[0]):
    token1 = tokens[i]
    for j in range(i+1, u.shape[0]):
      token2 = tokens[j]
      if are_similar(u, i, j, cosine_threshold):
        print "%s and %s are similar!" % (token1, token2)



def process_news(news):
  omc = OccurrenceMatrixConstructor(news)
  #
  print("Building occurrence matrix")
  mat = omc.get_regularized_matrix()
  tokens = omc.tokens
  print("Converting")
  smat = scipy.sparse.csc_matrix(mat)
  print("SVDing a %d x %d sparse matrix" % smat.shape)
  ut, s, vt = sparsesvd(smat, 300) # do SVD, asking for 100 factors
  u = ut.transpose()
  print("Finding similar words")
  find_similar_words(ut.transpose(), tokens, 0.66)
  return ut, s, vt, omc

def main():
  news = get_news()
  omc = process_news(news)
 
if __name__ == "__main__":
  main()