#!/usr/bin/env python
#coding=utf-8
from selenium import webdriver
import time, sys,os
from pyvirtualdisplay import Display
path0=sys.path[0]
reload(sys)
sys.setdefaultencoding('utf-8')
print('running script in path :'+path0)
display=Display(visible=0,size=(800,600))
display.start()
browser = webdriver.Firefox()  # 启动Firefox浏览器
browser.implicitly_wait(30)# wait for 5 seconds

def BaiduKeyWordsRank(purl, pkeywords):
	url1 = 'http://www.baidu.com/s?wd=' # 百度搜索页
	url1=url1+pkeywords
	print('searching '+ purl+' for '+pkeywords)
#	print(purl)
	browser.get(url1)  # 打开页面
	#browser.minimize_window()  # 浏览器窗口最大化
	from selenium.webdriver.common.by import By
	from selenium.webdriver.support.ui import WebDriverWait
	from selenium.webdriver.support import expected_conditions as EC 
	rank=0 
	for page in  range(0,9):
		#browser.implicitly_wait(30)# wait for 5 seconds
		time.sleep(3)
		res = browser.find_elements_by_xpath("//div[@id='content_left']/div[contains(@class,'result') or @tpl]")  # 使用xpath查找页面中的元素
		print('search in '+str(len(res))+' result ')
		for ir in range(len(res)):  # 页面中元素遍历
		    if purl in res[ir].text:  # 判断
			rank = page * 10 + (ir+1) 
			break
		if (rank == 0  and page <10) :
		    numberOfRetry = 5
		    while (numberOfRetry >=0):
		        try:
		            nextpage=browser.find_element_by_link_text('下一页>')
		            nextpage.click()
		        except:
		            time.sleep(3)
		            numberOfRetry -= 1
		            print('retrying ...')
		        else:
		            break
#			print('next page...')
		else :
		        break
	return rank

import struct
import sys
import urllib2
import httplib
import re
import xml.etree.ElementTree

class RankProvider(object):
    """Abstract class for obtaining the page rank (popularity)
    from a provider such as Google or Alexa.

    """
    def __init__(self, host, proxy=None, timeout=30):
        """Keyword arguments:
        host -- toolbar host address
        proxy -- address of proxy server. Default: None
        timeout -- how long to wait for a response from the server.
        Default: 30 (seconds)

        """
        self._opener = urllib2.build_opener()
        if proxy:
            self._opener.add_handler(urllib2.ProxyHandler({"http": proxy}))

        self._host = host
        self._timeout = timeout

    def get_rank(self, url):
        """Get the page rank for the specified URL

        Keyword arguments:
        url -- get page rank for url

        """
        raise NotImplementedError("You must override get_rank()")

class AlexaTrafficRank(RankProvider):
    """ Get the Alexa Traffic Rank for a URL

    """
    def __init__(self, host="xml.alexa.com", proxy=None, timeout=30):
        """Keyword arguments:
        host -- toolbar host address: Default: joolbarqueries.google.com
        proxy -- address of proxy server (if required). Default: None
        timeout -- how long to wait for a response from the server.
        Default: 30 (seconds)

        """
        super(AlexaTrafficRank, self).__init__(host, proxy, timeout)

    def get_rank(self, url):
        """Get the page rank for the specified URL

        Keyword arguments:
        url -- get page rank for url

        """
        query = "http://%s/data?cli=10&dat=nsa&ver=quirk-searchstatus&uid=\
20120730094100&userip=192.168.0.1&url=%s" % (self._host, urllib2.quote(url, safe=''))

        response = self._opener.open(query, timeout=self._timeout)
        if response.getcode() == httplib.OK:
            data = response.read()

            element = xml.etree.ElementTree.fromstring(data)
            for e in element.iterfind("SD"):
                popularity = e.find("POPULARITY")
                if popularity is not None:
                    return int(popularity.get("TEXT"))


import pandas as pd 
cityname_csv =pd.read_csv(os.path.join(path0,'cityname.csv'),encoding='utf8')
ccityname=cityname_csv.loc[:,'ccityname']
ecityname=cityname_csv.loc[:,'ecityname']
keywords_csv=pd.read_csv(os.path.join(path0,'keywords.csv'),encoding='utf8')
keywords=keywords_csv.loc[:,'keywords']
url=keywords_csv.loc[:,'url']
import random
index_code=random.randrange(0,len(ccityname))

index_code2=random.randrange(0,len(ccityname))
while(1):
	if (index_code2==index_code):
		index_code=random.randrange(0,len(ccityname))
		continue
	else:
		break

ccityname_cur=ccityname[index_code]
ecityname_cur=ecityname[index_code]
ccityname_cur2=ccityname[index_code2]
ecityname_cur2=ecityname[index_code2]
index_key=random.randrange(1,len(url))
index_key2=index_key
index_key3=index_key
index_key4=index_key
while (1):
	index_key2=random.randrange(1,len(url))
	if (index_key2==index_key):
		continue
	else:
		break
while(1):
	index_key3=random.randrange(1,len(url))
	if (index_key3==index_key):
		continue
	else:
		break
while(1):
	index_key4=random.randrange(1,len(url))
	if (index_key4==index_key3):
		continue
	else:
		break

running = True
maxrep=1
iter=1
rank1=0
while running:
    if iter>1:
        print 'retrying...'
    keywords_cur1=keywords[index_key]
    url_cur1=url[index_key]
    keywords_cur1=keywords_cur1.replace('ccityname',ccityname_cur)
    url_cur1=url_cur1.replace('ecityname',ecityname_cur)
    rank1=BaiduKeyWordsRank(url_cur1,keywords_cur1)  # printing rank
    iter+=1
    if iter>maxrep:
            running=False
    if rank1>0:
        running=False
    else :
        print 'not found ...'

        
        
running = True
iter=1
rank2=0
while running:
    if iter>1:
        print 'retrying...'
    keywords_cur2=keywords[index_key2]
    url_cur2=url[index_key2]
    keywords_cur2=keywords_cur2.replace('ccityname',ccityname_cur)
    url_cur2=url_cur2.replace('ecityname',ecityname_cur)
    rank2=BaiduKeyWordsRank(url_cur2,keywords_cur2)  # 打印rank
    iter+=1
    if iter>maxrep:
            running=False
    if rank2>0:
        running=False
    else :
        print 'not found ...'

        
        
running = True
iter=1
rank3=0
while running:
    if iter>1:
        print 'retrying...'
    keywords_cur3=keywords[index_key3]
    url_cur3=url[index_key3]
    keywords_cur3=keywords_cur3.replace('ccityname',ccityname_cur2)
    url_cur3=url_cur3.replace('ecityname',ecityname_cur2)
    rank3=BaiduKeyWordsRank(url_cur3,keywords_cur3)  # 打印rank
    iter+=1
    if iter>maxrep:
            running=False
    if rank3>0:
        running=False
    else :
        print 'not found ...'

        

running = True
iter=1
rank4=0
while running:
    if iter>1:
        print 'retrying...'
    ccityname_cur2=ccityname[index_code2]
    ecityname_cur2=ecityname[index_code2]
    keywords_cur4=keywords[index_key4]
    url_cur4=url[index_key4]
    keywords_cur4=keywords_cur4.replace('ccityname',ccityname_cur2)
    url_cur4=url_cur4.replace('ecityname',ecityname_cur2)
    rank4=BaiduKeyWordsRank(url_cur4,keywords_cur4)  # 打印rank
    iter+=1
    if iter>maxrep:
            running=False
    if rank4>0:
        running=False   
    else :
        print 'not found ...'

        

print('the rank of '+url_cur1+' for '+keywords_cur1+' is :')
print(rank1)
print('the rank of '+url_cur2+' for '+keywords_cur2+' is :')
print(rank2)
print('the rank of '+url_cur3+' for '+keywords_cur3+' is :')
print(rank3)
print('the rank of '+url_cur4+' for '+keywords_cur4+' is :')
print(rank4)
browser.quit()
display.stop()
url = "http://www.cityhouse.cn/"
p= AlexaTrafficRank()
alexa_rank=p.get_rank(url)
print('alexa rank of '+url+"= "+str(alexa_rank))
result_name1=os.path.join(path0,'result1.csv')
result_name2=os.path.join(path0,'result2.csv')
if os.path.exists(result_name1):
	result_csv1=pd.read_csv(result_name1)
else:
	result_csv1=pd.DataFrame(columns=['日期', '城市', 'alexa排名', '中国房价', '房产', '房价', '租金', '二手房','新楼盘'])
if os.path.exists(result_name2):
    result_csv2=pd.read_csv(result_name2)
else:
    result_csv2=pd.DataFrame(columns=['日期', '城市', 'alexa排名', '中国房价', '房产', '房价', '租金', '二手房','新楼盘'])

date_cur=time.strftime('%y/%m/%d')
data1={'日期':date_cur, '城市':ccityname_cur,'alexa排名':str(alexa_rank), '中国房价':' ', '房产':' ', '房价':' ', '租金':' ', '二手房':' ','新楼盘':' '}
ress=pd.DataFrame(data=data1,index=[0],columns=['日期', '城市', 'alexa排名', '中国房价', '房产', '房价', '租金', '二手房','新楼盘'])
ress.iloc[0,index_key2+3]=str(rank2)
ress.iloc[0,index_key+3]=str(rank1)
result_csv1=result_csv1.append(ress)
data1={'日期':date_cur, '城市':ccityname_cur2,'alexa排名':str(alexa_rank), '中国房价':' ', '房产':' ', '房价':' ', '租金':' ', '二手房':' ', '新楼盘':' '}
ress=pd.DataFrame(data=data1,index=[0],columns=['日期', '城市', 'alexa排名', '中国房价', '房产', '房价', '租金', '二手房','新楼盘'])
ress.iloc[0,index_key4+3]=str(rank4)
ress.iloc[0,index_key3+3]=str(rank3)
result_csv2=result_csv2.append(ress)
result_csv1.to_csv(result_name1,encoding='utf-8',index=False)
result_csv2.to_csv(result_name2,encoding='utf-8',index=False)
  
