# -*- coding: utf-8 -*-
import os
os.chdir("..")

import urllib2
import urlparse
from scrapy.selector import Selector
import Data.InterestKeywords as InterestKeywords

def set_proxy():
    proxy_handler = urllib2.ProxyHandler({"http": 'http://109.105.1.52:8080',"https": 'http://109.105.1.52:8080'})
    opener = urllib2.build_opener(proxy_handler)
    urllib2.install_opener(opener)

headers = {
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
        'Accept-Encoding': 'none',
        'Accept-Language': 'en-US,en;q=0.8',
        'Connection': 'keep-alive'}

def GetSubPageUrlList(_url):
    print "***************************"
    print _url
    print "***************************"
    #reg = r'http://www.baidu.com/link\?url=.[^\"]+'
    #comreg = re.compile(reg)

    req = urllib2.Request(_url, headers=headers)

    try_count = 5
    htmlpage=""
    while try_count > 0:
        try_count -= 1
        try:
            response = urllib2.urlopen(req,timeout=3)
            htmlpage = response.read()
            break
        except Exception, e:
            print "try again(", try_count, ")", e.message
            pass
    if htmlpage == "":
        print "!!! empty page content"
        return [],None
    #print "%%%%%%%%%%%%%%%%%%%%%%%%"
    #print htmlpage
    #print "%%%%%%%%%%%%%%%%%%%%%%%%"
    infoList1 = Selector(text=htmlpage).xpath(u'//div[@id="content_left"]/div/h3/a/@href').extract()
    next_page_url = Selector(text=htmlpage).xpath(u'//div[@id="page"]/a[contains(text(),"下一页")]/@href').extract()

    if len(next_page_url) > 0:
        next_page_url = urlparse.urljoin(_url, next_page_url[0])
    else:
        next_page_url = None
        print "!!! empty next page button"
    #infoList1 = re.findall(comreg, htmlpage)
    # 将列表去重之后返回
    return list(set(infoList1)),next_page_url

def FetchRealUrl(_url):
    true_url = None

    req = urllib2.Request(_url, headers=headers)
    try_count = 5
    while try_count > 0:
        try_count -= 1
        try:
            response = urllib2.urlopen(req,timeout=3)
            # 获取页面的真实的链接
            true_url=  response.geturl()
        except Exception, e:
            print "try again(",try_count,")", e.message
            pass

    return true_url

def fetch_urls_by_keyWord_from_baidu(num=100,keyword_main="",keyword_choose="",keyword_exclude=""):
    '''获取结果页面中指定页数的子链接'''
    mainList = [];

    print "searching for keyword：%s,choose one: %s, exclude keyword:%s " % (keyword_main, keyword_choose, keyword_exclude)
    # 将关键词进行url编码
    encodeKeyword = urllib2.quote(keyword_main)
    encodeKeyword_choose = urllib2.quote(keyword_choose)
    encodeKeyword_exclude = urllib2.quote(keyword_exclude)

    # url = 'http://www.baidu.com/s?wd=%s&pn=%d&tn=baiduhome_pg&ie=utf-8&usm=0' % (encodeKeyword, 10)
    url = 'http://www.baidu.com/s?q1=%s&q2=&q3=%s&q4=%s&rn=%d&lm=0&ct=0&ft=&q5=&q6=&tn=baiduadv' % (
        encodeKeyword, encodeKeyword_choose, encodeKeyword_exclude, 10)

    while len(mainList) <= num:
        subList,next_page_url = GetSubPageUrlList(url)
        if len(subList) == 0:
            break
        for u in subList:
            true_u = FetchRealUrl(u)
            if true_u == None:
                mainList.append(u)
            else:
                mainList.append(true_u)
            print len(mainList), true_u
        if next_page_url == None:
            break
        url = next_page_url
    return mainList



import os
if __name__ == '__main__':
    #set_proxy()

    #print urlparse.unquote('https://www.baidu.com/s?wd=%28%E9%87%91%E8%9E%8D%2B%E9%94%80%E5%94%AE%2B%E5%AE%A1%E8%AE%A1%2B%E5%B8%82%E5%9C%BA%2B%E5%B9%BF%E5%91%8A%2B%E5%88%B6%E9%80%A0%2B%E5%95%86%E4%B8%9A%E5%B1%95%E8%A7%88%29&pn=0&oq=%28%E9%87%91%E8%9E%8D%2B%E9%94%80%E5%94%AE%2B%E5%AE%A1%E8%AE%A1%2B%E5%B8%82%E5%9C%BA%2B%E5%B9%BF%E5%91%8A%2B%E5%88%B6%E9%80%A0%2B%E5%95%86%E4%B8%9A%E5%B1%95%E8%A7%88%29&tn=baiduadv&ie=utf-8&rsv_pq=f79395a10001864e&rsv_t=a005rbB%2FoIyYVVsvrmeY9%2Fqlr1bVn3%2BUjJGWWKHq%2By5cKPwub24x7bP0ci0l4VU')

    catagroy_keyword_list = InterestKeywords.get_catagory_keyword_list("now")

    #to map
    map_with_catagroy_keyword={}
    for i in catagroy_keyword_list:
        map_with_catagroy_keyword[i[0]]=i[1]

    print "Number of catagroy: ", len(catagroy_keyword_list)
    catagroy_keyword_list = catagroy_keyword_list[:57]


    save_dir = "Data/BaiduURLs/"
    crawl_count = 2000

    for i in catagroy_keyword_list:
        if isinstance(i[1],unicode) == False:
            continue
        searchword = i[1].encode("utf-8")
        c = i[0]
        url_file = save_dir +c

        #skip unormal or crawled catagroy
        if searchword == "":
            continue
        if os.path.exists(url_file):
            continue

        words = c.split(".")
        words_len = len(words)

        if words_len == 1:
            continue
        elif words_len == 2:
            searchword = searchword.strip().replace(" ", "+").replace("，", "+")
            crawler = fetch_urls_by_keyWord_from_baidu(crawl_count,searchword)
        elif words_len == 3:
            parent_word = map_with_catagroy_keyword[".".join([words[0],words[1]])]
            parent_word = parent_word.encode("utf-8")
            # if the parent only single word, add it to search as main keyword
            if parent_word.find(" ") != -1 or parent_word.find("，") != -1:
                parent_word = ""
            searchword = searchword.strip().replace(" ","+").replace("，","+")
            crawler = fetch_urls_by_keyWord_from_baidu(crawl_count,parent_word, searchword)
        else:
            continue

        if len(crawler) > 0:
            url_f = open(url_file,"a+")
            for u in crawler:
                url_f.write(u+"\n")
            url_f.close()

