#!/usr/bin/python
# -*- encoding:utf-8 -*-
"""
输入是一个页面的url 比如 http://news.baidu.com/

输出是页面上 所有的<a>元素中的链接以及链接文字 , 比如:
    [(
    "http://news.xinhuanet.com/world/2010-05/09/c_1282452.htm",
    "阿富汗与美国军队打死10名塔利班人员"
    ),]

"""
import urllib2
import re

linkregex = re.compile('<a\s*href=[\'|"](http.*?)[\'"].*?>([^<].*?)</a>')
charsetregex = re.compile("text/html;[\s]*charset=(.*)")

class Crawler(object):
    """
    用于抓取网站内容的爬虫
    """
    def __init__(self,user_agent=None):
        self.user_agent = user_agent or 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0)'

    def get_content_and_charset(self,url,headers=None,follow_redirects=True):
        headers = headers or {}
        headers['user-agent'] = headers.get('user-agent', self.user_agent)
        request = urllib2.Request(url,headers=headers)
        response = urllib2.urlopen(request)
        content_type = response.headers['Content-Type']
        print response.info()
        print response.msg
        print dir(response)
        print response.headers.keys()
        charset = charsetregex.findall(content_type)
        print content_type,charset
        return response.read(),charset[0].lower()

def find_url(url):
    """
       得到页面中的所有标题和文字

       >>> find_url("http://news.baidu.com/").next()
       ... (u'http://www.baidu.com/', u'\u7f51\u9875')

      >>> find_url("http://www.163.com/").next()
      ... (u'http://reg.163.com/reg/reg0_new.jsp?product=163', u'\u6ce8\u518c\u901a\u884c\u8bc1')

    """
    c=Crawler()
    content,charset = c.get_content_and_charset(url)
    links = linkregex.findall(content)
    for link in links:
        if charset in ['gb2312','gbk']:
            charset = 'gb18030'
        yield link[0].decode(charset),link[1].decode(charset)

if __name__ == '__main__':
    print find_url("http://news.google.com/").next()
    #for link, title in find_url("http://news.baidu.com/"):
    #    print link,title
