#coding=utf-8
import urllib,time
from BeautifulSoup import BeautifulSoup
from digg import *

akey = urllib.quote('http://jayyoung.cn/digg', safe='')
def secs2dmy(secs):
    return time.strftime( '%d-%m-%Y', time.localtime(float(secs)))
def is_popular(status):
    if status == u'popular':return 'yes'
    return 'no'
def get_soup(query):
    query = '+'.join( query.split(' ') )
    url = 'http://digg.com/search?s=%s&submit=Search&section=all&type=all&area=all&sort=score'%query
    #print url
    html = urllib.urlopen(url).read()
    return BeautifulSoup(html)

def paser_get_links(soup):
    
    links = []
    
    ns = soup.findAll(lambda tag:tag.has_key('class') and tag['class']==u'news-body')
    i = 0
    for n in ns:
        #print i
        links.append(n.findAll('a')[0]['href'])
        i = i +1
    return links


def get_info_by_link(link):
    d = Digg(akey)
    s = d.getStories(link=link)[0]
    return s.title, is_popular(s.status), s.diggs, s.comments, s.link, s.href, secs2dmy(s.submit_date)

def get_infos(query):
    soup = get_soup(query)
    links = paser_get_links(soup)
    
    r = []
    for link in links:
        r.append(get_info_by_link(link))
    
    return r
    
    
