import urllib
from lxml import etree
from relevantParser import grabContent
from lxml.etree import tostring

# 1. Readibilify find elements
relevants = {}

for news in data:
    print news
    relevants[news] = grabContent(news, data[news])

# 1.1 print relevants
for i in relevants:
    print relevants[i].name, relevants[i].attrMap

# 1.2 rank the most common and strong (divs...)
relevant = relevants[u'http://news.bbc.co.uk/go/rss/-/2/hi/technology/10128476.stm']

# 1.3 Prepare root name and atributes
name = relevant.name
attr = "".join(["[@%s='%s']" % (atrr, value) for atrr, value in relevant.attrMap.iteritems()])

# 2. lxml parse
data2 = {}

for news in data:
    html = lxml.html.fromstring(data[news]) #lxml.html.soupparser.fromstring(t), lxml.html.fromstring(data[news])
    #".//div[@class='article-text'][@name='article-text']"
    relevantElement = html.find(".//%s%s" % (name, attr))

    # 3. text
    #relevantElement.text_content()
    try:
        data2[news] = relevantElement.text_content() #tostring(relevantElement) #relevantElement.text_content()
    except Exception, e:
        print e


for i in data2:
    print "*" * 100
    print i
    print data2[i]
