import lxml.html
import urllib.request
import logging
log=logging.getLogger('bishop')

#kinonews.ru
def kinonews_id():
    page = urllib.request.urlopen('http://www.kinonews.ru/news/')
    doc=lxml.html.document_fromstring(page.read())
    a=doc.find_class('titlefilm')
    listnews=[]
    for i in a:
        if len(i.attrib['href'])>13:pass
        elif (i.attrib['href']) in listnews:pass
        else:
            listnews.append(i.attrib['href'])        
    return listnews

def kinonews_text(new):
    page = urllib.request.urlopen('http://www.kinonews.ru/'+new)
    data=page.read()
    char=page.info().get_content_charset()
    site=data.decode(char)
    doc=lxml.html.fragment_fromstring(site,create_parent='td')
    element=''
    text=''
    for i in doc.iter():
        if i.keys()==['style'] and i.values()==["text-align:justify;"]:
            element=i
    for i in element.itertext():
        text+=i
    text=text.replace('\t',' ')+('\nKinonews.ru: %s ?/|/?' % new)
    return text
#bash.im
def bash_id():
    x=0
    trying=-1
    id_mem=None
    while x==0:
        id_txt=''
        request=urllib.request.Request('http://bash.im/')
        user_agent="Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1"
        request.add_header('User-agent',user_agent)
        page = urllib.request.urlopen(request)
        doc=lxml.html.document_fromstring(page.read())
        for i in doc.iter():
            if i.keys()==['href','class'] and i.text_content()[0]=='#':
                id_txt+=i.text_content()
        if len(id_txt)!=0:
            id_mem=list(id_txt.split('#')[1:])
            x=1
        trying+=1
    if trying>0:
        log.debug(' Bash reconnecting is %s'% trying)
    return id_mem
         
def bash_text(id_mem):
    ful_url='http://bash.im/quote/'+id_mem
    request=urllib.request.Request(ful_url)
    user_agent="Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1"
    request.add_header('User-agent',user_agent)
    page = urllib.request.urlopen(request)
    data=page.read()
    char=page.info().get_content_charset()
    site=data.decode(char)
    doc=lxml.html.fragment_fromstring(site,create_parent='body')
    element=''
    text=''
    for i in doc.iter():
        if i.keys()==['class'] and i.values()==['text']:
            element=i
    for i in element.itertext():
        text+=(i+'\n')
    return text+('Bash.im: %s ?/|/?' % id_mem)

#ixbt.com 
def ixbt_com():
    page = urllib.request.urlopen('http://www.ixbt.com/')
    data=page.read()
    char=page.info().get_content_charset()
    site=data.decode(char)
    doc=lxml.html.fragment_fromstring(site,create_parent='body')
    element=[]
    tex_id=[]
    for i in doc.iter():
        if i.keys()==['class'] and i.values()==['nl_link']:
            element.append(i)
    for i in element:
        for k in i.iter():
            if k.keys()==['href','title']:
                tex_id.append(k.values()[0])
    return tex_id

def ixbt_text(id_news):
    page = urllib.request.urlopen('http://www.ixbt.com'+id_news)
    data=page.read()
    char=page.info().get_content_charset()
    site=data.decode(char)
    doc=lxml.html.fragment_fromstring(site,create_parent='body')
    element=None
    text_noed=''
    for i in doc.iter():
        if i.keys()==['class'] and i.values()==['news_body']:
            element=i
    text_noed+=element.text_content().replace('\r','')
    text_noed=text_noed.replace('  ','').replace('\n\n','\n')
    return(text_noed.replace('\n\n\n','')+'\nixbt.com: %s ?/|/?' % id_news )

#kanobu.ru
def kanobu_ru():
    page=urllib.request.urlopen('http://kanobu.ru/blog/news/')
    doc=lxml.html.document_fromstring(page.read())
    block=doc.find_class('unit description')
    text_id=[]
    for i in block[0].iter():
        if i.keys()==['href']:
            if (i.values()[0][0:6])=='/blog/':
                if i.values()[0] not in text_id:
                    text_id.append(i.values()[0])
    return text_id


def kanobu_text(new):
    page = urllib.request.urlopen('http://kanobu.ru'+new)
    data=page.read()
    char=page.info().get_content_charset()
    site=data.decode(char)
    doc=lxml.html.fragment_fromstring(site,create_parent='div')
    element=None
    text=''
    time=''
    not_edit_text=''
    for i in doc.iter():
        if i.keys()==['class'] and i.values()==['text unit']:
            element=i
        if i.keys()==['class'] and i.values()==['date']:
            time=i.text_content()
    for i in element.iter():
        not_edit_text=i.text_content().replace('\t','')
        break
    not_edit_text=not_edit_text.replace('\n','')
    not_edit_text=not_edit_text.replace('|','')
    not_edit_text=not_edit_text.split('.')
    not_edit_text=not_edit_text[:-1]
    for i in not_edit_text:
        text+=i+'. '
    text=text+' - '+time+'\n kanobu.ru: %s ?/|/?' % new
    return text

#GoHa.ru
def GoHa_ru():
    page=urllib.request.urlopen('http://www.goha.ru/feed/0')
    data=page.read()
    char=page.info().get_content_charset()
    site=data.decode(char)
    doc=lxml.html.fragment_fromstring(site,create_parent='div')
    text=[]
    for i in doc.iter():
        if i.keys()==['id','class']:
            if i.values()[0][0:3]=='new':
                text.append(i.values()[0][5:])
    return text

def GoHa_text(new):
    cat='http://www.goha.ru/c/archive/item/0/'+new+'.html'
    page = urllib.request.urlopen('http://www.goha.ru/c/archive/item/0/'+new+'.html')
    data=page.read()
    char=page.info().get_content_charset()
    site=data.decode(char)
    doc=lxml.html.fragment_fromstring(site,create_parent='div')
    element=None
    text=''
    time=''
    not_edit_text=''
    for i in doc.iter():
        if i.keys()==['class'] and i.values()==['news_body']:
            element=i
        if i.keys()==['class'] and i.values()==['posted']:
            time+=i.text_content()
    for i in element.iter():
        if i.values()==['articleBody']:
            not_edit_text=i.text_content()
            break
    not_edit_text=not_edit_text.replace('\r\n\r\n\r\n\r\n',' ')
    text+=not_edit_text.replace('\r\n','\n')
    return text+' - '+time+'?/|/?'

#Habrahabr.ru
def Habrahabr_ru():
    page=urllib.request.urlopen('http://vkontakte-feed.appspot.com/feed/habr/wall?show_photo=0')
    data=page.read()
    site=data.decode()
    text=[]
    while True:
        tag_start=site.find("http://habrahabr.ru")
        if tag_start == -1:
            break
        url=(site[tag_start:(tag_start+40)])
        url=url.split('"&gt')[0]
        text.append(url)
        site=site[tag_start+40:]
    return text

def habratext(url):
    page=urllib.request.urlopen(url)
    data=page.read()
    site=data.decode()
    doc=lxml.html.fragment_fromstring(site,create_parent='div')
    title=""
    new=""
    for i in doc.iter():
        if i.keys()==['class'] and i.values()==['title']:
            title=i.text_content()
            break
    title=title.replace('\n','')
    title=title.replace('\t','')
    for i in doc.iter():
        if i.keys()==['class'] and i.values()==['content html_format']:
            new=i.text_content()
    if len(title)<5:
        title='Топик был перенесен автором в черновик'
    return title+'\n'+new+'Habrahabr: %s ?/|/?' % url