## @alex 30 augost 2011 17:57
## Soft for site statistics in search engines
##

import xml.etree.ElementTree as etree
import urllib2
import os
import glob

sites_path = 'sites'

## -----read-params-from-file---------------------------------------------
def get_keywords(site_name):
	res = []
	with open(site_name) as key_file:
		for x in key_file:
			res.append(x.strip())
	return res

def get_sites_keywords():
	res = {}
	if os.path.exists(sites_path):
		os.chdir(sites_path)
		res = res.fromkeys(glob.glob('*.*')) # return dict of all files
		for site in res:
			res[site] = get_keywords(site)
		os.chdir('..') # change current dir to default for *.xml
		return res
	else:
		return False
## -----end-read-params---------------------------------------------------
## -----parse-xml-file----------------------------------------------------
def get_query(xml_file):
# TODO: make condition if and parse <error attribute 'code' and text
    tree = etree.parse(xml_file)
    return tree.findtext('request/query')
    
def get_domain_list(xml_file):
    res = []
    tree = etree.parse(xml_file)   
    for node in tree.findall('response/results/grouping/group/doc'):
        res.append(node.findtext('domain'))
    return res

def test_xml():
    file_name = 'result.xml';
    with open(file_name, 'r') as f_xml:
##        print 'All finding domain: ', get_domain_list(f_xml)
        f_xml.seek(0) # very intresting... why?
        print 'Request: ', get_query(f_xml)

## -------------------------------------------------------------------
## -------------------------------------------------------------------
def create_request(keyword):
	tpl_req = """<?xml version='1.0' encoding='utf-8'?>
<request>
    <query>{KEY}</query>
    <groupings>
	<groupby attr="d" mode="deep" groups-on-page="10"  docs-in-group="1" /> 	
    </groupings>
</request>"""
	final_rec = tpl_req.replace('{KEY}', keyword)
	return final_rec

def get_xml_request(xml_req):
	q_address = "http://xmlsearch.yandex.ru/xmlsearch?user=robotov-alex&key=03.131096296:7674d3bd73ee222d0139c1f079d50be5"
	connect = urllib2.Request(q_address, xml_req)
	res = urllib2.urlopen(connect)
	return res

def get_position(xml_obj, domain):
    
    f_domains = get_domain_list(xml_obj)
    for d in f_domains:
        if d == domain:
            return f_domains.index(d) + 1
    return 0
## -------------------------------------------------------------------
def sites_menu():
    print 'sites for analising: \n'
    sites = get_sites_keywords().keys()
    for s in sites:
        print sites.index(s) + 1, '.  ', s
    print '\n'
    work_site = int(raw_input('write number of site you choosing: '))
    return sites[work_site - 1]
## -------------------------------------------------------------------
# TODO: make @closure
def write_in_file(file_name, xml_obj):
    with open(file_name, 'w') as xml_doc:
        xml_doc.write(xml_obj.read())
## -------------------------------------------------------------------
def run_queue(site_name):
    keywords = get_keywords(sites_path + '/' + site_name)
    for key in keywords:
        xml_request = create_request(key)
        xml_obj = get_xml_request(xml_request)
        write_in_file('with_menu.tmp', xml_obj)
        # this print's for comparison key before and after request
        print key
        print get_query(xml_obj)
        print get_domain_list(xml_obj)
        print 'Position(', key ,'): ', get_position(xml_obj, site_name)
        print '----------- end for site ', site_name, ' -------------'
## -------------------------------------------------------------------

def main():
    print '---== Seo statistics ==---'
    print 'console version\n'
	
    sites = get_sites_keywords()
    print 'sites: ', sites
    keywords = get_keywords(sites_path + '/' + get_sites_keywords().keys()[0])
    xml_request = create_request(keywords[0])
    #xml_obj = get_xml_request(xml_request)
    
    # data for debaging ---------
    xml_obj = open('result.xml')
    # data for debaging ---------

	# write xml answer in temp file
    #with open('out.tmp', 'w') as xml_doc:
    #    xml_doc.write(xml_obj.read())
   
    print get_query(xml_obj)
    xml_obj.seek(0)
    print get_domain_list(xml_obj)
    xml_obj.seek(0)

    # data for debaging ---------
    print 'Position(youtube): ', get_position(xml_obj, 'www.youtube.com') 
    # data for debaging ---------
    # menu testing
    choosing_site = sites_menu()
    run_queue(choosing_site)

if __name__ == "__main__":
    main()
