# This file is part of Sonedyan.
#
# Sonedyan is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation;
# either version 3 of the License, or (at your option) any
# later version.
#
# Sonedyan is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE.  See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public.
# If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2009-2012 Jimmy Dubuisson <jimmy.dubuisson@gmail.com>

#
# usage: python 5c_normalize-filtered-1grams-subset.py <subset-file>
#
# save the normalized time series for each 1gram found in "normalized-<subset-filename>.txt" 

import sys, logging

from progressbar import ProgressBar, Percentage, Bar

log = logging.getLogger("loggger")

# load the yearly stats
fd1 = open("yearly-stats.txt", "r")

line = fd1.readline().strip()
record = {}

while line:
	year, count = line.split()
	record[year] = int(count) 
	line = fd1.readline().strip()

fd1.close()

subsetName = sys.argv[1]

# load subset 1grams
fd1b = open(subsetName, "r")

line1b = fd1b.readline().strip()

record1b = {}
counter = 0

while line1b:
	# there should be only 1gram per line therefore splitting the line might be useless
	#noun = line1b.split()
	record1b[line1b] = counter
	counter += 1
	line1b = fd1b.readline().strip()

fd1b.close()

# open the non normalized noun stats
fd2 = open("filtered-1grams.txt", "r")

# output file
fd3 = open("normalized-" + subsetName.split(".")[0] + ".csv" , "w")

currentNgram = ""
line2 = fd2.readline().strip()
record2 = {}
# filtered-1grams.txt has 8'585'414 lines
pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=8585414).start()
linenum = 1;
timeSeriesValues = ""

def getTimeSeriesString(timeSeriesRecord):
	s = ""
	for i in range(1800, 2001):
		if (timeSeriesRecord.has_key(str(i))):
			s += " " + str(timeSeriesRecord[str(i)])
		else:
			s += " 0"
	return s.strip()

# read the stats file
while line2:
        ngram, year, matchCount, pageCount, volumeCount = line2.split(',')
	pbar.update(linenum)
	linenum += 1
	# when we pass to the next 1gram, generate the current 1 gram normalized time series
	if (int(year) >= 1800 and int(year) <= 2000 and record1b.has_key(ngram)):
		if (currentNgram != ngram):
			if (len(record2) > 0):
				timeSeriesValues += getTimeSeriesString(record2) + "\n"
				record2 = {}
			# change the 1gram being analyzed
			log.info("Found: " + ngram)
			print(ngram)
			currentNgram = ngram
		# store the yearly normally statistics 
        	yearlyCount = record[year]
		record2[year] = float(float(matchCount) / float(record[year]))
	line2 = fd2.readline().strip()

# write last 1gram
if (len(record2) > 0):
	timeSeriesValues += getTimeSeriesString(record2) + "\n"

fd3.write(timeSeriesValues)

fd2.close()
fd3.close()

pbar.finish()
sys.stdout.write('\n')
