import qp
from yos.boss import ysearch
from yos.yql import db
import nltk
import ner
import re
import urllib2

QTYPE_POINT=100
KEYWORD_COUNT_POINT=8
KEYWORD_ORDER_POINT=5
KEYWORD_VARIATION_POINT=2
MAX_WORD_IN_SENT=1000

def search_web(orig_query,ques_point):
	
	count_of_result=30
	entity_choice_list=[]
	
	data=ysearch.search(orig_query,count=count_of_result,more={'abstract':'long'})
	table=db.create(data=data)
	
	keyword_list=get_keywords(orig_query)
	print "keywords=",keyword_list
	if len(keyword_list)==0:
		return
	count=0
	
	orig_query_toks=nltk.word_tokenize(orig_query)
	
	for r in table.rows:
		title=nltk.clean_html(r['title'])
		url=nltk.clean_html(r['url'])
		clickurl=nltk.clean_html(r['clickurl'])
		abstract=nltk.clean_html(r['abstract'])
		
		#call process_result -> it will return result_list of the form [word,current_sent_toks,curr_sent_point,total_point]
		process_result(entity_choice_list,orig_query_toks,keyword_list,ques_point,title,url,clickurl,abstract)
		
		
	#it's time to rank the entity_choice_list. call sort_list()
	ranked_result=sort_list(entity_choice_list)
	#now decide the final answer to be sent:
	for i in ranked_result:
		print i[0], i[3],'\n-----------'
	final_ans=get_final_answer(ranked_result)
	#now determine if it is a possible answer according to the points
	if final_ans!=None:
		stat=check_if_ans_probable(final_ans,keyword_list)
		if stat==0:
			return None
	#now return it
	return final_ans
	
def check_if_ans_probable(final_ans,keywords_list):
	"""tries to determine if the final answer is probable a correct answer, based on the no of keywords
	"""
	cutoff=0
	l=len(keywords_list)
	if l<=3:
		cutoff=30
	elif l<=5:
		cutoff=20
	elif l<=7:
		cutoff=15
	
	if cutoff==0:
		minpoint=120
	else:
		minpoint=l*cutoff
	
	ans=final_ans[0]
	point=ans[3]
	if point < minpoint-2:
		return 0
	else:
		return 1
		
def get_final_answer(ranked_result):
	"""returns the final answer from the ranked_result
	"""
	if ranked_result==None or ranked_result==[]:
		return None
	final_ans_list=[ranked_result[0]]
	TOTAL_POINT_POS=3
	DECIDING_FRACTION=float(7)/6
	if float(ranked_result[0][TOTAL_POINT_POS])/ranked_result[1][TOTAL_POINT_POS] < DECIDING_FRACTION:
		final_ans_list.append(ranked_result[1])
	
	return final_ans_list

	
def sort_list(orig_tmp_list):
	"""sort in descending order of total_point"""
	TOTAL_POINT_POS=3
	tmp_list=orig_tmp_list
	for i in range(1,len(tmp_list)):
		for j in range(len(tmp_list)-i):
			entry_1=tmp_list[j]
			entry_2=tmp_list[j+1]
			if entry_1[TOTAL_POINT_POS] < entry_2[TOTAL_POINT_POS]:
				t=tmp_list[j]
				tmp_list[j]=tmp_list[j+1]
				tmp_list[j+1]=t
	
	return tmp_list
	
	
def get_search_query(orig_query,focus):
	""" tries to generate a search query from orig-query.
	focus is of the form [,np,ques-word]
	"""
	q=focus[2].lower()
	if q.find('give')!=-1 and q.find('display')!=-1 and q.find('list')!=-1 and q.find('show')!=-1 :
		return orig_query
	else:
		return re.sub(focus[2],'',orig_query)

def get_descriptive_answer(search_q,name):
	"""return a descriptive answer after searching
	"""
	query="%s site:%s" % (search_q, "en.wikipedia.org")
	data=ysearch.search(query,count=5,more={'abstract':'long'})
	table=db.create(data=data)
	if data==None:
		return
	for r in table.rows:
		url=nltk.clean_html(r['url'])
		title=nltk.clean_html(r['title'])
		count=0
		title=title.lower()
		for i in name:
			i1=i.lower()
			if title.find(i1)!=-1:
				count+=1
		
		if count/2+1< len(name)/2:
			continue
		line=get_url(url)
		
		if line==None:
			continue
		tmp1=unicode(line,errors='ignore')
		tmp=tmp1.encode('ascii','ignore')
		abstract=nltk.clean_html(tmp)
		ans=[['','','', '', url,'',abstract]]
		return ans

	
def get_url(url):
	"""retrieves the url from web
	"""
	ua='Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.8) Gecko/20100202 Firefox/3.5.8'
	accept='text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
	hd={'User-Agent':ua,'Accept':accept}
	req=urllib2.Request(url,headers=hd)
	response=urllib2.urlopen(req)
	lin='.'
	while lin!=None:
		lin=response.readline()
		if lin[:3]=='<p>':
			#print 'returning lin'
			return lin
			
		elif lin.find('</body>')!=-1 or lin.find('</html>')!=-1:
			#print 'returning none'
			#print lin
			return None
			
			
#need to modify in LARGE!!!!!!!!!!!!!!!!!!!!	
def process_result(entity_choice_list,orig_query_toks,keyword_list,ques_point,title,url,clickurl,abstract):
	""" write documentation """
	abs_sents=nltk.sent_tokenize(abstract)
	#print "\n\n",abstract,"\n\n"
	for sent in abs_sents:
		sent_toks=nltk.word_tokenize(sent)
		
		#now determine what kind of entity we are looking for
		if ques_point!="PERSON" and ques_point!="GPE" and ques_point!="ORGANIZATION":
			#run my own ner
			entity_identified=ner.get_named_entity(sent,ques_point)
			if entity_identified==[]:
				continue
				
			#get_word_as_list(entity_identified)
			#if compare_word_list(word,keyword_list)==0:
			k_point=get_score(orig_query_toks,sent_toks,keyword_list)
			for word in entity_identified:
				if compare_word_list(word,keyword_list)==0:
					if ques_point=='DATE':
						index_position=date_entity_already_exist(entity_choice_list,word)
					else:
						index_position=entity_already_exist(entity_choice_list,word)
					if index_position== -1:
						entity_choice_list.append([word,sent_toks,k_point,k_point,url,clickurl,abstract])
					else:
						add_dup_entity_list_entry(entity_choice_list,word,sent_toks,k_point,url,clickurl,abstract,index_position)
				
		else:
			sent_tags=nltk.pos_tag(sent_toks)
			sent_ne=nltk.ne_chunk(sent_tags)
			sent_subtrees=sent_ne.subtrees()
			
			#now check if there are any subtrees, except the first one
			#and see it it has the ANSWER_TYPE
			check=0
			word=[]
			
			for i in sent_subtrees:
				if check==0:
					check=1
				else:
					if ques_point==i.node:
						word=get_word_as_list(i.leaves())
						#now add it to entity_choice_list, along with score
						
						#check if the entity-word recognized is already present in keyword_list
						if compare_word_list(word,keyword_list)==0:
							#now see the no of keywords
							k_point=get_score(orig_query_toks,sent_toks,keyword_list)
						
							#now check if duplicate entry added, or not
							index_position=entity_already_exist(entity_choice_list,word)
							if index_position== -1:
								entity_choice_list.append([word,sent_toks,k_point,k_point,url,clickurl,abstract])
							else:
								add_dup_entity_list_entry(entity_choice_list,word,sent_toks,k_point,url,clickurl,abstract,index_position)
	
	return 


	
def add_dup_entity_list_entry(entity_choice_list,word,sent_toks,k_point,url,clickurl,abstract,index_position):
	""" modify the entity_choice_list by adding the new point gained"""
	dup_entry=entity_choice_list[index_position]
	existing_word=dup_entry[0]
	existing_curr_sent_toks=dup_entry[1]
	existing_total_point=dup_entry[3]
	
	if len(existing_word)< len(word) :
		new_entry=[word,existing_curr_sent_toks,k_point,existing_total_point+k_point,url,clickurl,abstract]
	else:
		new_entry=[existing_word,existing_curr_sent_toks,k_point,existing_total_point+k_point,url,clickurl,abstract]
	
	#now change the entry
	entity_choice_list[index_position]=new_entry
	
	return
	
	
def get_score(orig_query_toks,sent_toks, keyword_list):
	""" returns a score based on:
	1.	no of keywords
	2.	order of keywords
	3. 	sparseness of keywords
	"""
	k_point=0
	
	#the var 'keywords_present' is of the form [[key1,loc],...]
	keywords_present=get_keywords_present(sent_toks, keyword_list)
	no_of_keywords_present=len(keywords_present)
	no_of_keywords=len(keyword_list)
	
	#first point calculation
	k_point+=no_of_keywords_present*KEYWORD_COUNT_POINT
	
	#second point calculation
	order_list=[]
	for k in keywords_present:
		order_list.append(k[1])
	
	order_count= get_max_no_keys_in_order(order_list)
	order_weight=(float(order_count)/no_of_keywords/4)
	p_to_increase=k_point*order_weight
	k_point+=round(p_to_increase)
	
	#third point calculation
	closeness_weight=get_closeness_weight(orig_query_toks,sent_toks,keywords_present)
	p_to_increase=k_point*closeness_weight
	k_point+=round(p_to_increase)
	
	return k_point
	
def get_closeness_weight(orig_query_toks,sent_toks,keywords_present):
	q_list=[]
	s_list=[]
	s_closeness=0
	q_closeness=0
	
	for kw in keywords_present:
		s_list.append(kw[1])
		q_list.append(orig_query_toks.index(kw[0]))
	
	for i in range(len(s_list)-1):
		t=s_list[i+1]-s_list[i]
		if t>=0:
			s_closeness+=t
	
	for i in range(len(q_list)-1):
		t=q_list[i+1]-q_list[i]
		if t>=0:
			q_closeness+=t
	
	if s_closeness==0:
		return 0
		
	weight=float(q_closeness)/s_closeness/4
	#allow only a maximum of 0.25 weightage
	if weight > 0.25:
		weight=0.25
	
	return weight

	
def get_keywords_present(sent_toks,keyword_list):
	"""returns a dictionary of the form [[key1,loc],...]
	"""
	keywords_present=[]
	for i in keyword_list:
		count=0
		for j in sent_toks:
			if i.lower() == j.lower() :
				keywords_present.append([i,count])
				break
			count+=1
	
	return keywords_present
	
	
	
def get_max_no_keys_in_order(order_list):
	"""Returns no of keys in order as given in the question
	"""
	max_list=[]
	for i in order_list:
		tmp_list=[i]
		for j in order_list[order_list.index(i)+1:]:
			if j > tmp_list[-1]:
				tmp_list.append(j)
		if len(tmp_list)> len(max_list):
			max_list=tmp_list
	
	return len(max_list)


#need to delete	
def entity_already_exist(entity_choice_list,word):
	"""checks if 'word' or its variants already exist in the 'entity_choice_list' """
	count=0
	for w in entity_choice_list:
		actual_word=w[0]
		#now try to match any similiarities between word and actual_word
		if compare_word_list(actual_word,word)==1:
			return count
		else:
			count+=1
			
	return -1

def date_entity_already_exist(entity_choice_list,word):
	"""checks if 'word' or its variants already exist in the 'entity_choice_list' """
	count=0
	for w in entity_choice_list:
		actual_word=w[0]
		#now try to match any similiarities between word and actual_word
		if compare_date_list(actual_word,word)==1:
			return count
		else:
			count+=1
			
	return -1

def compare_word_list(actual_word,word):
	"""tries to find any matches between two lists, and returns 1 if matches exist, else 0"""
	flag=0
	for w in word:
		for i in actual_word:
			if i.lower().find(w.lower()) !=-1 :
				flag=1
				break
	
	return flag
	

def compare_date_list(actual_word,word):
	"""tries to find any matches between two lists, and returns 1 if matches exist, else 0"""
	if word[2]!=actual_word[2]:
		return 0
	if word[1]!=actual_word[1]:
		return 0
	if word[0]!=actual_word[0]:
		return 0
	
	return 1
	
#to be modified to return proper keywords	
def get_keywords(orig_query):
	keyword_tag_list=["CD","JJ","JJR","JJS","LS","NN","NNP","NNPS","NNS"]
	(new_query,modify_list)=qp.modify_query(orig_query)
	toks=nltk.word_tokenize(new_query)
	tags=nltk.pos_tag(toks)
	keyword_list=[]
	for i in tags:
		if i[1] in keyword_tag_list:
			if modify_list.has_key(i[0]):
				keyword_list.append(modify_list[i[0]])
			else:
				keyword_list.append(i[0])
	
	return keyword_list

	
def get_word_as_list(alist):
	wd=[]
	for i in alist:
		wd.append(i[0])
	return wd

	
def get_image_details(word):
	"""search for word in yahoo image result...and then return the first result
	"""
	data=ysearch.search(word,vertical='images',count=10)
	if data==None:
		return 
	images=db.create(data=data)
	words=re.findall('\w+',word)
	for i in words:
		if len(i)<3:
			words.remove(i)
	for pic in images.rows:
		for w in words:
			if pic['title'].lower().find(w.lower())!=-1:
				url=pic['url']
				h=pic['height']
				w=pic['width']
				return [url,h,w]
	return None
	