#!/usr/env python
# -*- coding: utf-8 -*-

import unittest
import urllib2
from urlparse import urljoin
import re
import os, time
from datetime import datetime
from pyquery import PyQuery as pq

DEFAULT_PATH =os.path.join(os.path.pardir, 'res')
def grab_mp3(data, ref):
    ''' 抓取其中的MP3链接并返回文件名  '''
    mp3= pq(data)('a#mp3')
    url = mp3.attr('href')
    name = url[url.rfind('/')+1:]
    if not os.path.isdir(DEFAULT_PATH):
        os.mkdir(DEFAULT_PATH)
    if os.path.exists(os.path.join(DEFAULT_PATH,name)):
        print '%s exists'%name
        return name[:-4]
    headers = {'Referer': ref}
    req = urllib2.Request(url = url, headers=headers)
    data = urllib2.urlopen(req).read()
    with open(os.path.join(DEFAULT_PATH,name), 'wb') as fp:
        fp.write(data)
    return name[:-4]

def grab_content(data, name):
    name = '%s.txt'%name
    if os.path.exists(os.path.join(DEFAULT_PATH,name)):
        print '%s exists'%name
        return
    text = pq(data)('div#content')
    text = text.remove('div')
    text = text.html().replace('<br />', '\n')
    with open(os.path.join(DEFAULT_PATH,name), 'w') as fp:
        fp.write(text)
    return text

def grab(url, days=2):
    data = urllib2.urlopen(url).read()
    ls = pq(data)('div#list')
    for li in ls('li'):
        aa = pq(li)('a')
        text = aa.text()
        pub_date = re.findall(r'\d{4}-\d{1,2}-\d{1,2}', text)[0]
        pub_date = datetime.strptime(pub_date, "%Y-%m-%d")
        if (datetime.today()-pub_date).days>days: continue
        item_url = aa.attr('href')
        item_url = urljoin(url, item_url)
        print item_url
        # 能将urljoin('http://www.baidu.com/a/', '/b.html')
        # 变成 ： 'http://www.baidu.com/b.html'
        data = urllib2.urlopen(item_url).read()
def voa51_local(url='http://www.51voa.com/VOA_Standard_1.html', days=1, path='.'):
        name = grab_mp3(data, item_url)
        grab_content(data, name)
        time.sleep(3)

def voa51_links(url, days=1):
    '''找出全部子链接'''
    ret = []
    ls = pq(url)('div#list')
    for li in ls('li'):
        aa = pq(li)('a')
        text = pq(li).text()
        pub_date = re.findall(r'\d{2,4}-\d{1,2}-\d{1,2}', text)[0]
        try:
            pub_date = datetime.strptime(pub_date, "%Y-%m-%d") # 4 bit year
        except ValueError:
            pub_date = datetime.strptime(pub_date, '%y-%m-%d') # 2 bit year
        if (datetime.today()-pub_date).days>days: continue
        item_url = aa.attr('href')
        item_url = urljoin(url, item_url)
        ret.append(item_url)
    return ret


def voa51_extract(url):
    '''提供一个url, 找出其中的 mp3链接和正文
    '''
    data = pq(url)
    mp3 = data('a#mp3').attr('href')
    name = mp3[mp3.rfind('/')+1:-4]
    content  = data('div#content').remove('div').html().replace('<br />', '\n')
    return {'url': mp3,
            'name': name,
            'content': content,
            'referer': url}

def down_mp3(url, referer, path):
    req = urllib2.Request(url=url, headers={'Referer': referer})
    data = urllib2.urlopen(req).read()
    with open(path, 'wb') as fp:
        fp.write(data)



def main():
    url = 'http://www.51voa.com/VOA_Standard_English/'
    grab(url, 10)

class TestMe(unittest.TestCase):
    @unittest.skip('ok')
    def test_voa51_extract(self):
        url = 'http://www.51voa.com/VOA_Standard_English/pakistani-action-film-strikes-a-nerve-over-india-53832.html'
        res = voa51_extract(url)
        self.assertEqual('pakistani-action-film-strikes-a-nerve-over-india', res['name'])

    def test_voa51_links(self):
        url = 'http://www.51voa.com/VOA_Standard_2.html'
        voa51_links(url)
if __name__ == '__main__':
    unittest.main()

