import requests
import random
from lxml import etree
import time
import csv
from selenium import webdriver
from PIL import Image
# 云打码平台API
from vcode import *
import urllib3
from requests_html import HTMLSession,UserAgent
from selenium.webdriver.common.by import By


urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

headers = {'User-Agent': ''}

def get_info(url):
    user_agent = UserAgent().random  # 创建随机请求头
    headers = {"User-Agent": user_agent}
    # 解决验证码反爬虫问题
    try:
        r = requests.get(url, headers=headers, verify=False, timeout=60)
        html = etree.HTML(r.text)
        detail_url = html.xpath('//a[@class="btn-redir"]/@href')[0]
        r = requests.get(detail_url, headers=headers, verify=False, timeout=60)
        html = etree.HTML(r.text)

        total_price = html.xpath('//div[@class="tab-cont-right"]/div[1]/div[1]/div[1]/i/text()')[0]
        style = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][1]/div[1]/div[1]/text()')[
            0].replace('\n', '').strip()
        area = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][1]/div[2]/div[1]/text()')[0]
        unit_price = \
        html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][1]/div[3]/div[1]/text()')[0]
        direction = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][2]/div[1]/div[1]/text()')[
            0]
        floor = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][2]/div[2]/div[1]/text()')[0]
        decoration = \
        html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line clearfix"][2]/div[3]/div[1]/text()')[0]
        local = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line"]/div[2]/div[2]/a[1]/text()')[0].replace(
            '\n', '').strip()
        school = html.xpath('//div[@class="tab-cont-right"]/div[@class="tr-line"]/div[3]')
        if len(school):
            school = 1
        else:
            school = 0
        data = {'总价': total_price,
                '户型': style,
                '建筑面积': area,
                '单价': unit_price,
                '朝向': direction,
                '楼层': floor,
                '装修': decoration,
                '区域': local,
                '学校': school}
        content = {'建筑年代': '',
                   '有无电梯': '',
                   '产权性质': '',
                   '住宅类别': '',
                   '建筑结构': '',
                   '建筑类别': ''}
        info = html.xpath('//div[@class="content-item fydes-item"]/div[2]//span/text()')
        for i in range(int((len(info) - 2) / 2)):
            content[info[2 * i]] = info[2 * i + 1]
        to_csv(data, content)
    except:
        title = process_captcha(url)
        # 检验url是否有效，推测原因是房源信息已经不存在了，
        # 如果存在，则重新请求，反之，就结束当前请求，开始下一个请求
        if title == '兰州二手房-房天下':
            delete.append(url)
            pass
        else:
            # 防止一个请求循环进行，导致一直使用验证码平台，进行下一个
            if url in flag:
                return
            flag.append(url)
            get_info(url)

def to_csv(data,content):
    with open('house.csv', 'a+', encoding='utf-8', newline='') as f:
        writer = csv.writer(f)
        writer.writerow([data['户型'], data['建筑面积'], data['朝向'], data['楼层'], data['装修'],
                        content['建筑年代'], content['有无电梯'], content['产权性质'], content['住宅类别'],
                        content['建筑结构'], content['建筑类别'], data['区域'],
                        data['学校'], data['总价'], data['单价']])

def process_captcha(url):

    driver = webdriver.Firefox()
    driver.get(url)
    print(url)
    driver.save_screenshot('code.png')
    left = 700
    top = 340
    right = 900
    bottom = 405
    im = Image.open('code.png')
    im = im.crop((left, top, right, bottom))
    im.save('captcha.png')
    # 实例化，需要自己的账号、密码、验证码对应类型
    cjy = Chaojiying_Client(你的账号，你的密码， '902223')
    im = open('captcha.png', 'rb').read()
    code = cjy.PostPic(im,1004).get('pic_str')
    driver.find_element(By.ID,'code').send_keys(code)
    time.sleep(1)
    driver.find_element(By.NAME,'submit').click()
    time.sleep(2)
    driver.get(url)
    title = driver.title
    driver.close()
    return title

if __name__ == '__main__':

    house = []
    f = open('urls.txt')
    texts = f.readlines()
    for text in texts:
        house.append(text.rstrip())
    house = list(set(house))
    f.close()
    delete = []
    flag = []
    for i in range(2852,len(house)):
        print(f'开始爬取第{i+1}条信息')
        get_info(house[i])
    print('爬取结束！')