#!/usr/bin/python

import json
import sys
import numpy as np
import matplotlib.pyplot as plt
import collections
import multiprocessing
import pickle
from itertools import groupby
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
import re
from time import sleep

validLabels = None

def main():
	#extractData('PER_utf8', PER_utf8)
	#extractData('PER_ascii', PER_ascii)
	#extractData('PER_ascii_stem', PER_ascii_stem)
	#extractData('PER_ascii_stem_stopword', PER_ascii_stem_stopword)

	#extractData('All_Label', All_Label)
	#extractData('All_Label_Leaves', All_Label_Leaves)

	extractData(Label_Path)
	

def extractData(proc):

	global LabelDict	
	LabelDict = dict()
	for l in range(1,10):
		LabelDict[l] = np.loadtxt('../validLabels_L'+str(l)+'.txt',dtype=int)

	rawData = []
	
	with open('../../../Data229/amazon_products') as f:
		
		print 'Reading Data...'
		rawData = f.read().splitlines()

	proc(rawData)

def Label_Path(rawData):

	validLabels = np.loadtxt('../validLabels.txt',dtype=int)
	
	dataSize = len(rawData)
	labels = [[] for i in range(dataSize)]

	for idx in range(dataSize):

		j = json.loads(rawData[idx].strip('\x01\n'))
		if 'BrowseNodes' not in j['Item']:
			pass
		elif 'Ancestors' in j['Item']['BrowseNodes']['BrowseNode']:
			nextLabel = []
			curr = j['Item']['BrowseNodes']['BrowseNode']
			while 'BrowseNodeId' in curr:
				if ('IsCategoryRoot' in curr) and ('Name' in curr):
					if int(curr['Ancestors']['BrowseNode']['BrowseNodeId']) not in validLabels:
						break
					nextLabel.append(int(curr['Ancestors']['BrowseNode']['BrowseNodeId']))
					nextLabel.reverse()
					labels[idx].append(nextLabel)
					break
				else:
					if(int(curr['BrowseNodeId']) not in validLabels):
						break
					nextLabel.append(int(curr['BrowseNodeId']))

				if 'Ancestors' in curr and 'BrowseNode' in curr['Ancestors']:
					curr = curr['Ancestors']['BrowseNode']
				else:
					break
		else:
			for i in j['Item']['BrowseNodes']['BrowseNode']:
				nextLabel = []
				curr = i
				while 'BrowseNodeId' in curr:
					if ('IsCategoryRoot' in curr) and ('Name' in curr): # and (curr['Name']=='Categories'):
						if int(curr['Ancestors']['BrowseNode']['BrowseNodeId']) not in validLabels:
							break
						nextLabel.append(int(curr['Ancestors']['BrowseNode']['BrowseNodeId']))
						nextLabel.reverse()
						labels[idx].append(nextLabel)
						break
					else:
						if(int(curr['BrowseNodeId']) not in validLabels):
							break
						nextLabel.append(int(curr['BrowseNodeId']))
					if 'Ancestors' in curr and 'BrowseNode' in curr['Ancestors']:
						curr = curr['Ancestors']['BrowseNode']
					else:
						break
		if len(labels[idx]) == 0:
			labels[idx].append([0])
		print idx + 1
		#print labels[idx]
		#raw_input('--------------Enter...')
	
	with open('../Label_Path.pkl', 'wb') as outfile:
		pickle.dump(labels, outfile)

def PER_utf8(line):
	j = json.loads(line)
	feature = ""
	for i in j['Item']['PrunedEditorialReviews']:
		feature = feature+" "+i['Content']
	return feature.encode(encoding='utf8',errors='strict')

def PER_ascii(line):
	j = json.loads(line)
	feature = ""
	for i in j['Item']['PrunedEditorialReviews']:
		feature = feature+" "+i['Content']
	return feature.encode(encoding='ascii',errors='ignore')

def PER_ascii_stem(line):
	j = json.loads(line)
	feature = ""
	for i in j['Item']['PrunedEditorialReviews']:
		feature = feature+" "+i['Content']
	
	feature = feature.encode(encoding='ascii',errors='ignore')

	stemer = WordNetLemmatizer()
	tokenizer = RegexpTokenizer(r'\w+')

	tmp=[]
	for word in tokenizer.tokenize(feature):
		tmp.append(stemer.lemmatize(word))
	feature = ' '.join(tmp)
	
	return feature

def PER_ascii_stem_stopword(line):
	j = json.loads(line)
	feature = ""
	for i in j['Item']['PrunedEditorialReviews']:
		feature = feature+" "+i['Content']
	
	feature = feature.encode(encoding='ascii',errors='ignore')

	stemer = WordNetLemmatizer()
	tokenizer = RegexpTokenizer(r'[a-zA-Z]+')

	tmp=[]
	for word in tokenizer.tokenize(feature):
		w = stemer.lemmatize(word)
		if w not in stopwords.words('english'):
			tmp.append(w)
	feature = ' '.join(tmp)
	
	return feature

def Label_L1_last(line):
	j = json.loads(line)

	global validLabels
	validLabels = np.loadtxt('../validLabels_L1.txt',dtype=int)

	label = 0
	#Get label
	if 'BrowseNodes' not in j['Item']:
		label = 0
	elif 'Ancestors' in j['Item']['BrowseNodes']['BrowseNode']:
		label = getFirstLabel(j['Item']['BrowseNodes']['BrowseNode'])
	else:
		for i in j['Item']['BrowseNodes']['BrowseNode']:
			ret = getFirstLabel(i)
			if ret != 0:
				label = ret

	return label
	
def Label_L2_last(line):
	j = json.loads(line)

	global validLabels
	validLabels = np.loadtxt('../validLabels_L2.txt',dtype=int)

	label = 0
	#Get label
	if 'BrowseNodes' not in j['Item']:
		label = 0
	elif 'Ancestors' in j['Item']['BrowseNodes']['BrowseNode']:
		label = getFirstLabel(j['Item']['BrowseNodes']['BrowseNode'])
	else:
		for i in j['Item']['BrowseNodes']['BrowseNode']:
			ret = getFirstLabel(i)
			if ret != 0:
				label = ret

	return label

def Label_L1_first(line):
	j = json.loads(line)

	global validLabels
	validLabels = np.loadtxt('../validLabels_L1.txt',dtype=int)

	label = 0
	#Get label
	if 'BrowseNodes' not in j['Item']:
		label = 0
	elif 'Ancestors' in j['Item']['BrowseNodes']['BrowseNode']:
		label = getFirstLabel(j['Item']['BrowseNodes']['BrowseNode'])
	else:
		for i in j['Item']['BrowseNodes']['BrowseNode']:
			ret = getFirstLabel(i)
			if ret != 0:
				label = ret
				break

	return label
	
def Label_L2_first(line):
	j = json.loads(line)

	global validLabels
	validLabels = np.loadtxt('../validLabels_L2.txt',dtype=int)

	label = 0
	#Get label
	if 'BrowseNodes' not in j['Item']:
		label = 0
	elif 'Ancestors' in j['Item']['BrowseNodes']['BrowseNode']:
		label = getFirstLabel(j['Item']['BrowseNodes']['BrowseNode'])
	else:
		for i in j['Item']['BrowseNodes']['BrowseNode']:
			ret = getFirstLabel(i)
			if ret != 0:
				label = ret
				break

	return label

def Label_L1_all(line):
	j = json.loads(line)

	global validLabels
	validLabels = np.loadtxt('../validLabels_L1.txt',dtype=int)

	label = []
	#Get label
	if 'BrowseNodes' not in j['Item']:
		label.append(0)
	elif 'Ancestors' in j['Item']['BrowseNodes']['BrowseNode']:
		label.append(getFirstLabel(j['Item']['BrowseNodes']['BrowseNode']))
	else:
		for i in j['Item']['BrowseNodes']['BrowseNode']:
			ret = getFirstLabel(i)
			if ret != 0:
				label.append(ret)

	if len(label) == 0:
		label.append(0)

	return list(set(label))
	
def Label_Leaves_thread(idx,rawData,labels):
	j = json.loads(rawData[idx].strip('\x01\n'))
	
	if 'BrowseNodes' not in j['Item']:
		labels[idx] = 0
	elif 'Ancestors' in j['Item']['BrowseNodes']['BrowseNode']:
		ret = int(j['Item']['BrowseNodes']['BrowseNode']['BrowseNodeId'])
		for l in range(9,0,-1):			
			if ret in LabelDict[l]:
				labels[idx]= ret
	else:
		for l in range(9,0,-1):
			for i in j['Item']['BrowseNodes']['BrowseNode']:
				ret = int(i['BrowseNodeId'])
				if ret in LabelDict[l]:
					labels[idx]= ret
					
	labels[idx] = 0

def All_Label(rawData):

	dataSize = len(rawData)
	labels = []

	for idx in range(dataSize):
		labels.append([])

		j = json.loads(rawData[idx].strip('\x01\n'))
		
		if 'BrowseNodes' not in j['Item']:
			labels[idx].append(0)
		elif 'Ancestors' in j['Item']['BrowseNodes']['BrowseNode']:

			curr = j['Item']['BrowseNodes']['BrowseNode']
			while 'BrowseNodeId' in curr:
				labels[idx].append(int(curr['BrowseNodeId']))
				if 'Ancestors' in curr and 'BrowseNode' in curr['Ancestors']:
					curr = curr['Ancestors']['BrowseNode']
				else:
					break
		else:
			for i in j['Item']['BrowseNodes']['BrowseNode']:
				curr = i
				while 'BrowseNodeId' in curr:
					labels[idx].append(int(curr['BrowseNodeId']))
					if 'Ancestors' in curr and 'BrowseNode' in curr['Ancestors']:
						curr = curr['Ancestors']['BrowseNode']
					else:
						break
		print idx #,labels[idx]
	
	with open('../Label_all.pkl', 'wb') as outfile:
		pickle.dump(labels, outfile)

def All_Label_Leaves(rawData):

	dataSize = len(rawData)
	labels = []

	for idx in range(dataSize):
		labels.append([])

		j = json.loads(rawData[idx].strip('\x01\n'))
		
		if 'BrowseNodes' not in j['Item']:
			labels[idx].append(0)
		elif 'Ancestors' in j['Item']['BrowseNodes']['BrowseNode']:

			curr = j['Item']['BrowseNodes']['BrowseNode']
			if 'BrowseNodeId' in curr:
				labels[idx].append(int(curr['BrowseNodeId']))
		else:
			for i in j['Item']['BrowseNodes']['BrowseNode']:
				curr = i
				if 'BrowseNodeId' in curr:
					labels[idx].append(int(curr['BrowseNodeId']))
		print idx #,labels[idx]
	
	with open('../Label_all_leaves.pkl', 'wb') as outfile:
		pickle.dump(labels, outfile)

def Label_Leaves(rawData):
	
	dataSize = len(rawData)
	labels = [0] * dataSize
	
	for i in range(dataSize):
		print 'Processing #',i
		while True:
			if  len(multiprocessing.active_children()) < 8:
				multiprocessing.Process(target=All_thread, args=(i,rawData,labels)).start()
				break
			else:
				sleep(0.1)

	while len(multiprocessing.active_children()) != 0:
		print 'wait for subprocess to finish'
		sleep(1)

	with open('../ALL.pkl'+name, 'wb') as outfile:
		pickle.dump(labels, outfile)

def getFirstLabel(n):	
	
	if int(n['BrowseNodeId']) in validLabels:
		return int(n['BrowseNodeId'])
	else:
		if 'Ancestors' in n:
			return getFirstLabel(n['Ancestors']['BrowseNode'])
		else:
			return 0	
	

			

if __name__ == "__main__":
    main()
