# -*- coding=utf-8 -*-
import urllib2

def get_xml_request(xml_req):
        q_address = "http://xmlsearch.yandex.ru/xmlsearch?user=robotov-alex&key=03.131096296:7674d3bd73ee222d0139c1f079d50be5"
        connect = urllib2.Request(q_address, xml_req)
        res = urllib2.urlopen(connect)
        return res

def _parse_xml(xml_data):
    pass

def create_request(keyword):
	tpl_req = """<?xml version='1.0' encoding='utf-8'?>
<request>
    <query>{KEY}</query>
    <groupings>
	<groupby attr="d" mode="deep" groups-on-page="10"  docs-in-group="1" /> 	
    </groupings>
</request>"""
	final_rec = tpl_req.replace('{KEY}', keyword)
	return tpl_req;
	
def main():
    site_name = ''
    keywords = ['cats', 'dogs']

# find first keywords | changes for all keywords[:]
	final_rec = create_request(keywords[0])
    xml_doc = get_xml_request(final_rec)
    print xml_doc.read()

    print '\n', dir(xml_doc)
    print 'test: ', xml_doc.info()
# save temp result in file with 'with'
    with open('out.tmp', 'w') as f_out:
        f_out.write(xml_doc.read())
        f_out.close()
    
    _parse_xml(xml_doc)


if __name__ == "__main__":
    main()
