import requests
from lxml import etree
import random
import time
from mysqlHelper import get_a_conn
import datetime

# https://blog.csdn.net/BulletTech2021/article/details/121756396

# 伪装请求头
user_agents = [
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60',
    'Opera/8.0 (Windows NT 5.1; U; en)',
    'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',
    'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50',
    'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',
    'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10',
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
    'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
    'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 ',
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36',
    "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
    "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14",
    "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)",
    'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
    'Opera/9.25 (Windows NT 5.1; U; en)',
    'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
    'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
    'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
    'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
    "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
    "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 "
]


def getHeaders():
    user_agent = user_agents[random.randint(0, len(user_agents) - 1)]
    headers = {
        'User-Agent': user_agent
    }
    return headers


# 对一个URL发送请求，解析结果，获取所需数据
def get_data(url, all_house_list):
    try:
        # 反爬虫策略1：随机取headers
        response = requests.get(url, headers=getHeaders(), stream=True)
        tree = etree.HTML(response.text)
        # 定位到content__list
        li_list = tree.xpath(
            '//div[@class="content w1150"]/div[@class="content__article"]/div[@class="content__list"]/div')
        for li in li_list:
            # 下面是两种定位方式，都可
            # Nbhood = li.xpath('.//div[@class="content__list--item--main"]/p[@class="content__list--item--title"]/a[@class="twoline"]/text()')[0].strip().split(' ')[0].split('·')[1]
            # 租赁方式
            rent_mode = \
            li.xpath('.//div[@class="content__list--item--main"]/p[@class="content__list--item--title"]/a/text()')[
                0].strip().split(' ')[0].split('·')[0]
            # 朝向
            orientation = \
            li.xpath('.//div[@class="content__list--item--main"]/p[@class="content__list--item--title"]/a/text()')[
                0].strip().split(' ')[2]
            # 每月租金
            rent = \
            li.xpath('.//div[@class="content__list--item--main"]/span[@class="content__list--item-price"]/em/text()')[0]
            # 行政区
            division = \
            li.xpath('.//div[@class="content__list--item--main"]/p[@class="content__list--item--des"]/a/text()')[0]
            # 板块
            localtion = \
            li.xpath('.//div[@class="content__list--item--main"]/p[@class="content__list--item--des"]/a/text()')[1]
            # 街区
            block = \
            li.xpath('.//div[@class="content__list--item--main"]/p[@class="content__list--item--title"]/a/text()')[
                0].strip().split(' ')[0].split('·')[1]
            # 房屋面积
            size = li.xpath('.//div[@class="content__list--item--main"]/p[@class="content__list--item--des"]/text()')[
                4].strip()
            # 格局
            house_type = \
            li.xpath('.//div[@class="content__list--item--main"]/p[@class="content__list--item--des"]/text()')[
                6].strip()
            # 发布时长
            release_time = li.xpath(
                './/div[@class="content__list--item--main"]/p[@class="content__list--item--brand oneline"]/span[@class="content__list--item--time oneline"]/text()')[
                0]
            # 标签
            tags = li.xpath(
                './/div[@class="content__list--item--main"]/p[@class="content__list--item--bottom oneline"]/i/text()')
            tagg = ''
            for tag in tags:
                tagg = tagg + tag + ' '

            # 详情链接
            link = li.xpath(
                './/div[@class="content__list--item--main"]/p[@class="content__list--item--title"]/a[@class="twoline"]//@href')[
                0]
            all_house_list.append(
                (rent_mode, orientation, rent, division, localtion, block, size, house_type, release_time, tagg, link))

        return all_house_list
    except IndexError:
        pass


createTime = datetime.datetime.now().strftime('%Y-%m-%d') + ' ' + time.strftime("%H:%M:%S")


def before_get_data(username, city, pageSize):
    all_house_list = []
    # 循环爬取所需租房信息
    pages = ['https://{}.lianjia.com/zufang/pg{}/'.format(city, x) for x in range(1, int(pageSize))]  # 从第 1 页开始获取

    count = 0
    for page in pages:
        print(page)
        get_data(page, all_house_list)
        # 反爬虫策略2：每次爬取随机间隔3-10s
        time.sleep(random.randint(3, 10))
        count = count + 1
        print('第 ' + str(count) + '页爬取完毕！')

    save_data(all_house_list)
    return len(all_house_list)

def save_data(all_house_list):
    # 把all_house_list 保存到 zufang.txt文件中去
    with open('data/zufang.txt', 'w', encoding='utf-8') as file:
        for item in all_house_list:
            line = ','.join(map(str, item)) + '\n'
            file.write(line)


"""
fetchall() 方法通常与数据库游标（cursor）对象一起使用，在执行SQL查询后，可以使用这个方法来检索查询结果集中的所有数据。
返回的结果是一个元组（tuple）列表，每个元组代表结果集中的一行数据。如果没有结果，fetchall() 将返回空列表[]。
"""
def update_mysql(city, username):
    mysql = get_a_conn()

    print("--> 数据入库开始！")
    sql_cityname = "select city_name from tbl_city t where t.city_code = '%s'" % (city)


    print(sql_cityname)
    res = mysql.fetchall(sql_cityname)
    city_name = res[0].get('city_name')

    sql_data_all = "select * from tbl_rent_house t "
    all_house_list = mysql.fetchall(sql_data_all)

    print(len(all_house_list))
    count_insert = 0
    count_update = 0
    for data in all_house_list:
        # 判断之前是否插入过此数据 如果插入过  则更新
        sql_select = 'select * from tbl_house where link = "%s"' % data['link']
        res_select = mysql.fetchall(sql_select)
        if (len(res_select) > 0):
            sql_update = "update tbl_house set city_name='%s',city_code='%s',rent_mode='%s',orientation='%s',rent='%s'," \
                         "division='%s', localtion='%s', block='%s', size='%s', house_type='%s'" \
                         ", release_time='%s', tags='%s', create_time='%s' , create_user='%s' where link = '%s'" \
                         % (city_name, city, data['rent_mode'], data['orientation'], data['rent'], data['division'], data['location'], data['block'], data['size'], data['house_type'],
                            data['release_time'], data['tags'], createTime, username, data['link'])
            mysql.fetchall(sql_update)
            count_update += 1
        else:
            insert_sql = 'INSERT INTO tbl_house (city_name,city_code,rent_mode,orientation,rent,division,localtion,block,size,house_type,release_time,tags,link,create_time,create_user)  ' \
                         'VALUES ("%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s") ' \
                         % (city_name, city, data['rent_mode'], data['orientation'], data['rent'], data['division'], data['location'], data['block'], data['size'], data['house_type'],
                            data['release_time'], data['tags'], data['link'], createTime, username)
            mysql.fetchall(insert_sql)
            count_insert += 1
    print("--> 数据入库完毕,新增{}条数据，更新{}条数据 ".format(count_insert, count_update))

    print("--> 保存日志开始！")
    sql = 'insert into tbl_data_log (user_name,end_time,data_num,data_url,data_mode) values ("%s","%s","%s","%s","%s")' % (
        username, createTime, len(all_house_list), "", city_name)
    mysql.execute(sql)
    print("--> 保存日志完毕！")

    return len(all_house_list)


if __name__ == '__main__':
    result = before_get_data("脚本录入","xa",2)
    # result = update_mysql("xa","脚本录入")
