import os
import string
import sys
import codecs
from types import *

def generateStopWords():
	f = open("..\\stopwords.txt", 'r')
	stopwords = []
	
	for line in f:
		if "\\" not in line:
			stopwords.append(line.strip().split()[0])
	
	return stopwords
	
	'''
	return """a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,
		be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,
		for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,
		just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,
		of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,
		than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,
		wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,
		yet,you,your""".split(',')
	'''

if __name__ == '__main__':
	# CHANGE THE ROOT FOLDER
	root = "C:\\Users\\Zinho\\Downloads\\Amazon"
	wordcount = []
	
	sys.path.append("..\\lib\\stanford-corenlp-2010-11-12.jar")
	from edu.stanford.nlp.process import Morphology
	morph = Morphology()
	stopwords = generateStopWords()
	
	for subdir, dirs, files in os.walk(root):
		newPctg = 0
		oldPctg = -1
		i = 0
		
		cats = sys.argv[1:]
		
		if len(cats) == 0:
			process = True
		else:
			process = False
			if ("AirConditioners" in subdir) and ('a' in cats):
				process = True
			elif ("CanisterVacuums" in subdir) and ('v' in cats):
				process = True
			elif ("CoffeeMachines" in subdir) and ('c' in cats):
				process = True
			elif ("DigitalSLRs" in subdir) and ('d' in cats):
				process = True
			elif ("Laptops" in subdir) and ('l' in cats):
				process = True
			elif ("MP3Players" in subdir) and ('m' in cats):
				process = True
			elif ("SpaceHeaters" in subdir) and ('h' in cats):
				process = True
		
		if process:
			print subdir
			
			for file in files:
				fullpath = os.path.join(subdir, file)
				
				f = open(fullpath, 'r')
				previousWord = ""
				field = "null"
				
				#sentencePos = 0
				for line in f:
					for sentence in line.split('.'):
						#pos = 0
						for word in sentence.split():
							#pos += len(word)
							if previousWord == "Content:":
								field = "text"
							elif word == "ID:":
								field = "null"
							elif previousWord == "ID:":
								field = "id"
							elif word == "Category:":
								field = "null"
							elif previousWord == "Product:":
								field = "prod"
							elif word == "ReviewerID:":
								field = "null"
							
							if field == "id":
								id = word
							elif field == "prod":
								prod = word
							elif field == "text":
								# check to see if it's a stopword
								# if it's not, stem the word							
								word = morph.stem(word.lower().translate(string.maketrans("",""), string.punctuation))
								if word not in stopwords:
									wordcount.append((word, id))
							
							previousWord = word
						#sentencePos += pos
				f.close()
				
				i += 1
				newPctg = int(float(i)/len(files) * 100)
				if newPctg != oldPctg and newPctg % 10 == 0:
					print newPctg,"%"
				oldPctg = newPctg
	
	f = codecs.open("index.txt", 'w', 'utf-8')
	for word in wordcount:
		if not type(word[0]) is NoneType:
			#print word[0].encode('utf-8'),
			f.write(word[0].encode('utf-8') + "||" + word[1])
			f.write("\n")
		#print " ",
		#print word[1],
		#print " ",
		#print word[2],
		#print " ",
		#print word[3]
	f.close()