# -*- coding: utf-8 -*-
import urllib,socket,os,codecs,random,time
from myfuncs import *

xqs = ['红状元小区','台湾大街','长江盛阁','天齐雅诚名筑','锦都家园','中冶爱彼岸',
       '蓝图二期','爱琴海','怡海苑','凤海苑','恩马双城汇','昆泉星港','怡情海岸',
       '皇冠国际','皇冠国际一期','永联佳园','海尔地产·山海湾','千城凤梧金沙',
       '富源公馆','巧克力公寓金色海湾','亚星蓝图','金泽儒家','海信阳光里',
       '华裕黄金海岸','海信凤凰金岸','西海景苑','青岛印象金沙滩','海韵嘉园',
       '凤凰城','亚德里亚海湾','石雀小筑','海棠里小区','御龙湾朗寓','朱雀台',
       '滨海苑','海富海','官厅前海园','金色环海','提香海岸','康大观邸','福瀛东郡',
       '海岸风情','迪维花园','隆博洋房','金星滨海花园','环海君和花园','信发第五城市',
       '新华锦龙邸','来仪凤仪帝景','锦源尚都','天齐文海国际','恒大金沙滩','悦海花园',
       '九洲佳园','鲁泽花园','衡山舒苑','华阳公寓','万德丰观景苑','福瀛天麓湖','金沙花园']


#第一步，检索所有房源url并保存
price_low = 75  #最低房价
price_top = 121 #最高房价

open("urls_new.txt",'a')#重新创建输出文件
urls_save = []
for line in open('urls_save.txt','r'):
    urls_save.append(line.strip())
    
count = 1
loc = 0
for i in range(loc,len(xqs)):
    print "[%d/%d]"%(count+loc,len(xqs))
    f_url_new = open('urls_new.txt','a')
    xq = xqs[i]
    print i+1,xq
    urllib.urlretrieve('http://qd.lianjia.com/ershoufang/co41l2l3bp'+\
                       str(price_low)+'ep'+str(price_top)+'rs'+\
                       urllib.quote(xq)+'/',"index.html")
    urls = getUrls("index.html")
    print "urls:"+str(len(urls))
    if(urls != None):
        for url in urls:
            if url not in urls_save:                
                print "new:"+url
                f_url_new.write(url+"\n")
    else:
        print str(i)+u":失败"
        break
    #f_url_save.close()
    f_url_new.close()
    t = random.uniform(5,10)
    print 'sleep'+str(t)
    time.sleep(t)
    count += 1
    

###第二步，对旧信息进行复查
##url_save = open('urls_save.txt','r').readlines()
##
##info_dict = {}
##for ss in codecs.open('info_save.txt','r','utf-8').readlines():
##    s = ss.strip().split('\t')
##    info_dict[s[0].strip()] = [s[-2],ss.strip()]
##
##count = 1
##loc = 0
##for url in url_save[loc:]:
##    if len(url) <10:
##        continue
##    print "[%d/%d]"%(count+loc,len(url_save))
##    f_url_sold = open('urls_sold.txt','a')
##    f_url_save = open('urls_save.txt','a')
##    f_info_sold = open('info_sold.txt','a')
##    f_info_save = open('info_save.txt','a')
##    
##    ID = url[33:45]
##    if ID not in info_dict.keys():        
##        count += 1
##        continue
##        
##    price_old = info_dict[url[33:45]][0]
##    info = getInfoChged(url,price_old)#返回[信号，新价格]
##    print info_dict[ID][1]
##    if info[0] == 0:#下架
##        print 'Removed\t'+price_old+' ----> '+info[1]
##        f_url_sold.write(url)
##        f_info_sold.write(info_dict[ID][1]+"\t"+info[1]+"\tRemoved\n")
##    elif info[0] == 3:#成交
##        print 'sold'+price_old+' ----> '+info[1]
##        f_url_sold.write(url)
##        f_info_sold.write(info_dict[ID][1]+"\t"+info[1]+"\tSold\n")
##        
##    elif info[0] == 1:#涨价
##        print 'PriceUp\t'+price_old+' ----> '+info[1]
##        f_url_save.write(url)
##        f_info_save.write(info_dict[ID][1]+"\t"+info[1]+"\tPriceUp\n")
##    elif info[0] == 2:#不变
##        print 'Unchanged'
##        f_url_save.write(url)
##        f_info_save.write(info_dict[ID][1]+"\t"+info[1]+"\tUnchanged\n")   
##    time.sleep(random.uniform(10,15))
##    f_url_sold.close()
##    f_url_save.close()
##    f_info_sold.close()
##    f_info_save.close()
##    count += 1

    
###第三步，检索每个新房源的具体信息
##keys = ["ID","SellTime","name","buildTime","Tprice",\
##        "area","area1","price1","fangben","huxing","dianti",\
##        "chaoxiang","chanquan","zhuangxiu","louceng"]
##
##
##open("info_new.txt",'w')#信息检索开始前，清空'info_new.txt'信息
##urls = open('urls_new.txt','r').readlines()
##print len(urls)
##loc = 0
##count = 1
##for url in urls[loc:]:
##    f_url_save = open('urls_save.txt','a')
##    f_w_new = open("info_new.txt",'a')
##    f_w_save = open("info_save.txt",'a')
##    print url
##    print "[%d/%d]"%(count+loc,len(urls))
##    info = getInfo(url)
##    for key in keys:
##        f_w_new.write(info[key]+"\t")
##        f_w_save.write(info[key]+"\t")
##        
##    f_w_new.write(info["Tprice"]+"\tNew")
##    f_w_save.write(info["Tprice"]+"\tNew")
##    
##    f_w_new.write("\n")    
##    f_w_save.write("\n")
##    
##    f_url_save.write(url)
##    count +=1    
##    t = random.uniform(3,10)       
##    print "sleep:%.1f"%t
##    time.sleep(t)
##    f_w_new.close()
##    f_w_save.close()
##    f_url_save.close()
##
##open('urls_new.txt','w') #信息检索结束后，清空'urls_new.txt'信息

