#
# This file is part of Sonedyan.
# 
# Sonedyan is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation;
# either version 3 of the License, or (at your option) any
# later version.
#
# Sonedyan is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE.  See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public
# License along with Octave; see the file COPYING.  If not
# see <http://www.gnu.org/licenses/>.
# 
# Copyright (C) 2009-2013 Jimmy Dubuisson <jimmy.dubuisson@gmail.com>
#

from nltk.corpus import wordnet
import itertools
import random

# get max wup similarity between two words
def get_max_similarity(word1, word2):
	synsetsA = wordnet.synsets(word1)
	synsetsB = wordnet.synsets(word2)
	groupA = [wordnet.synset(str(synset.name)) for synset in synsetsA]
	groupB = [wordnet.synset(str(synset.name)) for synset in synsetsB]

	similars = []

	for sseta in groupA:
	    for ssetb in groupB:

		path_similarity = sseta.path_similarity(ssetb)
		wup_similarity = sseta.wup_similarity(ssetb)

		if path_similarity is not None:
			similars.append({'path':path_similarity, 'wup':wup_similarity, 'word1':sseta, 'word2':ssetb, 'word1_def':sseta.definition, 'word2_def':ssetb.definition})
	
	if len(similars) != 0:
		similars = sorted(similars, key=lambda item: item['wup'], reverse=True)
#		for item in similars:
#	    		print item['word1'], "-", item["word1_def"]
#	    		print item['word2'], "-", item["word2_def"]
#	    		print "Path similarity: ", item["path"]
#	    		print "WUP similarity: ", item["wup"], "\n"
		return similars[0]['wup']
	else:
		return -1

# get similarity between a set of words
def get_set_similarity(words):
	similarity = 0
	nok = 0
	ll = len(words)
	pairs = list(itertools.combinations(range(ll), 2))
	for p in pairs:
		w1 = words[p[0]]
		w2 = words[p[1]]
		s = get_max_similarity(w1,w2)
		if s != -1:
			nok = nok + 1
			similarity = similarity + s
	if (nok != 0):
		return (similarity / nok)
	else:
		return 0
	
# open file and get the lists of words
cliques = open("seed32-core-dual-cliques.txt", "r")

line = cliques.readline().strip()
lists = []
words = []

# load wornet nouns
while line:
	ws = line.split(".");
	lists.append(ws)
	words = words + ws
	line = cliques.readline().strip()

cliques.close()

alsim = 0
alrsim = 0
lwords = len(words)

for l in lists:
	ll = len(l)
	
	lsim = get_set_similarity(l)
	
	rl = random.sample(words, ll)
	lrsim = get_set_similarity(rl)
	
	alsim = alsim + lsim
	alrsim = alrsim + lrsim
	
	print l
	print lsim, "\n"
	
print "AVG set similarity: ", (alsim / len(lists))
print "AVG random set similarity: ", (alrsim / len(lists))
	
