# This file is part of Sonedyan.
#
# Sonedyan is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation;
# either version 3 of the License, or (at your option) any
# later version.
#
# Sonedyan is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE.  See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public.
# If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2009-2012 Jimmy Dubuisson <jimmy.dubuisson@gmail.com>

#
# usage: python filter-1grams.py <1grams-file.path>
#
# append its results to the 'filtered-1grams.txt' file
#

import sys

# the Wordnet noun file is obtained with the following query to the wnsql database:
#
# select distinct words.wordid, words.lemma from words left join senses using(wordid) left join synsets using(synsetid) where pos='n' into outfile '/tmp/nouns.txt' fields terminated by ',' lines terminated by '\n';
# 
nounFile = open("nouns_wordnet30.txt", "r")

line = nounFile.readline().strip()
record = {}

# load wornet nouns
while line:
	key, value = line.split(",");
	# from id->value to value->id
	record[value] = key
	line = nounFile.readline().strip()

nounFile.close()

# open 1gram file (0.csv, 1.csv, ...)
gramFile = open(sys.argv[1], "r")

# open output file
filteredFile = open("filtered-1grams.txt", "aw");

line2 = gramFile.readline().strip()

# read ngrams file & append Wordnet nouns statistics to the filtered file
while line2:
	ngram, year, matchCount, pageCount, volumeCount = line2.split()
	if record.has_key(ngram):
		filteredFile.write(ngram + ',' + year + ',' + matchCount + ',' + pageCount + ',' + volumeCount + '\n')
	line2 = gramFile.readline().strip()

gramFile.close()
filteredFile.close()
