# -*- coding: utf-8 -*-
#easy_install mechanize
import mechanize
import cookielib
import time
from bs4 import BeautifulSoup,Tag
from urllib import myquote_plus
from langconv import *
import random
from urllib import myquote_plus
# Browser
 
__all__ = ['InitBrowser','brbaidu_search']


__brbaidu = None

def InitBrowser(proxies= True):
    global __brbaidu
    if __brbaidu:
        __brbaidu.close()
        
    __brbaidu = mechanize.Browser()
    # Cookie Jar
    cj = cookielib.LWPCookieJar()
    __brbaidu.set_cookiejar(cj)
    
    # Browser options
    __brbaidu.set_handle_equiv(True)
    __brbaidu.set_handle_gzip(True)
    __brbaidu.set_handle_redirect(True)
    __brbaidu.set_handle_referer(True)
    __brbaidu.set_handle_robots(False)
    
    # Follows refresh 0 but not hangs on refresh > 0
    __brbaidu.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
    # Want debugging messages?
    #__brbaidu.set_debug_http(True)
    #__brbaidu.set_debug_redirects(True)
    #__brbaidu.set_debug_responses(True)
    
    # User-Agent (this is cheating, ok?)
    __brbaidu.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
    if proxies:
        __brbaidu.set_proxies({"http": "127.0.0.1:8087"})
    
#现在已经创建好了__br这个浏览器对象，可以用它来打开页面，或者进行交互。
#

InitBrowser(False)

def brbaidu_search(query,NumQue=25):

    url_search = "http://www.baidu.com"

    #query = u'北京航空航天大学'
    #NumQue = 25
    #query = myquote_plus(query, encoding='utf-8')
    url = url_search % vars()
    
    global __brbaidu
    r = __brbaidu.open(url)
    #print '@\t',__brbaidu.title().decode('utf8')
    html = r.read()
    
    __brbaidu.select_form(nr=0)
    __brbaidu.form['wd']=query.encode('utf-8')#myquote_plus(u'中国')
    __brbaidu.submit()
    html = __brbaidu.response().read()
    #__brbaidu.response().close()
    
    fd = open('c:\\cut\\test_bing_info.html','w')
    fd.write(html)
    fd.close()

    results = [] 
    while True:
        links_br = []
        for lnk in __brbaidu.links():
            links_br.append(lnk)
        
        soup = BeautifulSoup(html)
        
        content_left = soup.find('div',id='content_left')
        result_cnt=content_left.findAll('div',{'class':re.compile(r"\bresult.*")})
     

        for rcnt in result_cnt:
            title_cnt = rcnt.find('h3')
            title_cnt = title_cnt.find('a')
            title_str = title_cnt.text
            title_lnk = title_cnt.attrs['href']
            
            abstract=''
            c_abstract = rcnt.find('div',{'class','c-abstract'})
            
            try:
                abstract = c_abstract.text
            except:
                ps_tmp = rcnt.findAll('p')
                for ps in ps_tmp:
                    abstract = abstract + '\n' + ps.text
            results.append( (title_str,title_lnk,abstract ) )
        
        if len(results)<NumQue:
            nav_page = soup.find('div',id='container')
           
            for nv in nav_page:
                if not nv.attrs.get('id') is None and nv.attrs['id']=='page':
                    nav_page = nv
                    break
                
            next_page_lnk = None
            if nav_page.contents[-2].text==u'下一页>':
                next_page_lnk=nav_page.contents[-2]
                time.sleep(0.1*random.randint(0, 100)/100.0)
                r=__brbaidu.open('http://www.baidu.com'+next_page_lnk.attrs['href'])
                html = r.read()
            else:
                break
                
        else:
            break
    return results
            
        
    
    
    


