# -*- coding: utf-8 -*-
#easy_install mechanize
import mechanize
import cookielib
import time
from bs4 import BeautifulSoup,Tag
from urllib import myquote_plus
#from langconv import *
import random
from urllib import myquote_plus
# Browser
 
__all__ = ['InitBrowser','brbing_search']


__brbing = None

def InitBrowser(proxies= True):
    global __brbing
    if __brbing:
        __brbing.close()
        
    __brbing = mechanize.Browser()
    # Cookie Jar
    cj = cookielib.LWPCookieJar()
    __brbing.set_cookiejar(cj)
    
    # Browser options
    __brbing.set_handle_equiv(True)
    __brbing.set_handle_gzip(True)
    __brbing.set_handle_redirect(True)
    __brbing.set_handle_referer(True)
    __brbing.set_handle_robots(False)
    
    # Follows refresh 0 but not hangs on refresh > 0
    __brbing.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
    # Want debugging messages?
    #__brbing.set_debug_http(True)
    #__brbing.set_debug_redirects(True)
    #__brbing.set_debug_responses(True)
    
    # User-Agent (this is cheating, ok?)
    __brbing.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
    if proxies:
        __brbing.set_proxies({"http": "127.0.0.1:8087"})
    
#现在已经创建好了__br这个浏览器对象，可以用它来打开页面，或者进行交互。
#

InitBrowser(False)

def brbing_search(query,NumQue = 25):
#if True:
    url_search = "http://cn.bing.com/"

    #query = u'北京航空航天大学'
    #NumQue = 25
    #query = myquote_plus(query, encoding='utf-8')
    url = url_search % vars()
    
    global __brbing
    r = __brbing.open(url)
    #print '@\t',__brbing.title().decode('utf-8')
    #html = r.read()
    
    __brbing.select_form(nr=0)
    __brbing.form['q']=query.encode('utf-8')#myquote_plus(u'中国')
    __brbing.submit()
    html = __brbing.response().read()
    __brbing.response().close()
    
    fd = open('c:\\cut\\test_bing_info.html','w')
    fd.write(html)
    fd.close()
    
    res_results=[]
    while True:
        soup = BeautifulSoup(html)
        results_area = soup.find('div',id="results_area")
        #------------------------------------------------------------
            
        res_results_tmp = results_area.find('div',id='results')
        res_results_tmp = res_results_tmp.find('ul')
        for rrs in res_results_tmp.contents:
            title_tmp = rrs.find('h3')
            if title_tmp is None:
                title_tmp  = rrs.find('h2')
            if title_tmp is None:
                continue
            
            title_tmp = title_tmp.find('a')
            title_str = title_tmp.text
            title_lnk = title_tmp.attrs['href']
            
            des_res_tmp = rrs.find('p')
            if des_res_tmp is None:
                des_res_tmp = rrs.find('div',{'class':'ans'})
            des_res = ''
            if not des_res_tmp is None:
                des_res= des_res_tmp.text.encode('gbk','ignore').strip()
            res_results.append( (title_str, title_lnk, des_res) )
        
        
        if len(res_results)<NumQue:
            next_page = results_area.find('div',{'class':'sb_pag'})
            next_page=next_page.find('ul')
            next_page=next_page.contents[-1]
            next_page = next_page.find('a')
            if next_page.text==u'下一页':
                time.sleep(0.1*random.randint(0, 100)/100.0)
                r=__brbing.open('http://cn.bing.com'+next_page.attrs['href'])
                html = r.read()
            else:
                break
        else:
            break
        
    return res_results
    
   
    
    
    


