#encoding:utf8
'''
爬取安居客的上饶地区二手房信息

'''
from lxml import etree
import requests,pandas,sqlite3,traceback,threading,time

house_list=[]
def analyse(html):
    '''解析列表页面的房子信息字段'''
    try:
        html = etree.HTML(html)
        records = html.xpath('//li[@class="list-item"]') #单项房源
        for record in records:
            title = record.xpath('.//div[@class="house-title"]/a/@title')[0]
            house_url = record.xpath('.//div[@class="house-title"]/a/@href')[0]
            detail = record.xpath('.//div[@class="details-item"]/span/text()')
            num_room = detail[0]
            size = detail[1]
            floor = detail[2]
            build_year=detail[3]
            address_detail = record.xpath('.//span[@class="comm-address"]/@title')[0].split(u'  ')
            section = address_detail[0]
            address = ''.join(address_detail[1:])
            tag = '|'.join(record.xpath('.//div[@class="tags-bottom"]//text()')).replace('\n','').replace(' ','')
            broker= record.xpath('.//span[@class="broker-name broker-text"]/text()')[0]
            total_price = ''.join(record.xpath('.//span[@class="price-det"]//text()'))
            unit_price = record.xpath('.//span[@class="unit-price"]/text()')[0]
            sql_value =  "'"+"','".join([title,house_url,num_room,size,floor,build_year,section,address,tag,broker,total_price,unit_price])+"'"
            insert(title,section,sql_value)
    except:
        traceback.print_exc()
        print html
        #house_list.append([title,house_url,num_room,size,floor,build_year,section,address,tag,broker,totol_price,unit_price])
        #house_info = [title,house_url,num_room,size,floor,build_year,section,address,tag,broker,total_price,unit_price]
        #df = pandas.DataFrame(house_info)  # 创建DataFrame对象
        #with sqlite3.connect('ajk_db.sqlite') as db:
        #    df.to_sql('news_table', con=db,)

def insert(title,section,sql_value):
    '''将数据保存至sqlite数据库'''
    try:
        # 数据库文件是test.db，不存在，则自动创建
        conn = sqlite3.connect('/home/tu/PycharmProjects/ajk/ajk.db')
        # 创建一个cursor：
        cursor = conn.cursor()
        # 执行一条SQL语句：创建user表
        #cursor.execute('create table user(id varchar(20) primary key,name varchar(20))')
        #插入一条记录
        sql_insert = "insert into ajk(title,house_url,num_room,size,floor,build_year,section,address,tag,broker,total_price,unit_price) select %s where not exists(select * from ajk where title='%s' and section='%s')"%(sql_value,title,section)
        #print sql_insert
        cursor.execute(sql_insert)
        # 通过rowcount获得插入的行数：
        if cursor.rowcount>0:
            print(cursor.rowcount)  # reusult 1
        # 关闭Cursor:
        cursor.close()
        # 提交事务：
        conn.commit()
        # 关闭connection：
        conn.close()
    except:
        traceback.print_exc()

def download(page_index,str_proxy):
    '''利用代理下载页面'''
    url = "https://shangrao.anjuke.com/sale/p%s"%page_index
    url = "https://shangrao.anjuke.com/sale/xinzhoubc/p%s"%page_index
    header = {
        'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'}
    #req = requests.get(url, headers=header,  timeout=30,proxies=getPorxy(),)
    proxyaddr = "110.83.40.43"  # 代理IP地址
    proxyport = 57113  # 代理IP端口
    proxyusernm = "tulede"  # 代理帐号
    proxypasswd = "tld12345"  # 代理密码
    proxyurl = "http://" + proxyusernm + ":" + proxypasswd + "@" + str_proxy #'%s:%s'%(proxyaddr,proxyport)

    try:
        r = requests.get(url, proxies={'http': proxyurl, 'https': proxyurl}, headers={
            "User-Agent": "curl/0.7.6"})
        #r.encoding = 'gb2312'
        return r.text

    except IOError, e:
        print ''


def getPorxy(num):
    '''从代理网站接口获取代理IP/port'''
    proxy_num =num
    url = 'http://tulede.v4.dailiyun.com/query.txt?key=NP705B9165&word=&count=%s&rand=false&detail=false'%proxy_num
    try:
        r = requests.get(url)
        html = r.text
        proxies = html.split('\r\n')
        return proxies
    except IOError, e:
        print ''

def run(page_index, str_proxy):
    try:
        print 'page%s:%s------------'%(page_index, str_proxy)
        html = download(page_index, str_proxy)
        analyse(html)
    except:
        traceback.print_exc()
def control():
    '''并发多个爬取线程'''
    times =5
    page_index =0
    for i in range(times):
        parallel = 10
        proxies = getPorxy(parallel)
        for j in range(parallel):
            page_index = page_index +1
            str_proxy = proxies[j]
            t1 = threading.Thread(target=run, args=(page_index,str_proxy))
            t1.start()
    time.sleep(10)

if __name__=='__main__':
    control()
    #getPorxy()