import requests
import csv
import json
import re
import os
import sys
from pymysql import *
from utils.query import querys

# 初始化函数
def init():
    if not os.path.exists('houseInfoData.csv'):
        # newline -- 不换行
        with open('houseInfoData.csv', 'w', encoding='utf-8', newline='') as csv_f:
            writer = csv.writer(csv_f)
            writer.writerow([
                'title',
                'cover',
                'city',
                'region',
                'address',
                'rooms_desc',
                'area_range',
                'all_ready',
                'price',
                'houseDecoration',
                'company',
                'houseType',
                'on_time',
                'open_date',
                'tags',
                'totalPrice_range',
                'sale_status',
                'detail_url'
            ])

# 写入csv文件函数
def writeRow(row):
    with open('houseInfoData.csv', 'a', encoding='utf-8', newline='') as csv_f:
        writer = csv.writer(csv_f)
        writer.writerow(row)

# 获取爬取网站的json数据
def getData(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
        'Referer': 'https://sz.fang.lianjia.com/loupan/pg2/',
        'Cookie': 'lianjia_uuid=0194dddd-e71e-4000-8126-f2f37188010a; _smt_uid=657b34ab.3f6b67c4; _jzqc=1; _jzqckmp=1; sajssdk_2015_cross_new_user=1; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2218c6945be73e28-0b9c1b38c560d6-26001951-1350728-18c6945be74106f%22%2C%22%24device_id%22%3A%2218c6945be73e28-0b9c1b38c560d6-26001951-1350728-18c6945be74106f%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22https%3A%2F%2Fwww.baidu.com%2Flink%22%2C%22%24latest_referrer_host%22%3A%22www.baidu.com%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%7D%7D; _jzqc=1; _ga=GA1.2.28442054.1702573230; _gid=GA1.2.669413055.1702573230; _jzqx=1.1702646800.1702646800.1.jzqsr=sz%2Efang%2Elianjia%2Ecom|jzqct=/.-; _qzjc=1; _jzqa=1.272265084937531600.1702573227.1702573227.1702573227.1; lianjia_ssid=632ac11b-d89d-4cc4-a6ba-1bb5896b2e04; _jzqa=1.272265084937531600.1702573227.1702573227.1702647964.2; _jzqy=1.1702573227.1702647964.1.jzqsr=baidu.-; _ga_DB196GBT1C=GS1.2.1702647967.2.0.1702647967.0.0.0; login_ucid=2000000388917677; lianjia_token=2.0015a22945460f2d04040f0074c2ae2a40; lianjia_token_secure=2.0015a22945460f2d04040f0074c2ae2a40; security_ticket=QngWw1EkDHlUAHoBleePvn49F0pewcNK/WGfG4xHXAnTJVkBEKXcaQoToCfMTdqJ7w10afJm2qA5byB6i/zdIeg3kKX/KehSrpIg11TRk0q6PqzfEvoSEAFz5wKZITa8mPhMKIXvJkapZdBgQGxW1eIygaDqf65am9FjX0Eg6Ec=; digData=%7B%22key%22%3A%22loupan_index%22%7D; select_city=440300; lj_newh_session=eyJpdiI6IlA5dVNTN2s0YTF5K05zT05xNnljVlE9PSIsInZhbHVlIjoiZ25zQjdhUUYzU1VGY1wvcE5wcUE1YjhBSjM5WUMwNFBlRloybHU1MjJmXC9XYmFEZlp5TFZXeDlwSFZnQXpaTkhKSHNuNXdcLzhNYVNWUUMzcGZ2ZHgyanc9PSIsIm1hYyI6ImYzZGI2MDliYjM1ZDBlMDRkMzZiNWFjZmE2NWIwZWZlYzBhYThiOTlhMDNmYmYwNmEyNTE2ODM4ZjdhYzAyNDIifQ%3D%3D; _qzja=1.1265977499.1702646799891.1702646799891.1702646799891.1702649265062.1702650714567.0.0.0.5.1; _qzjb=1.1702646799891.5.0.0.0; _qzjto=5.1.0; _jzqb=1.3.10.1702647964.1; srcid=eyJ0IjoiXCJ7XFxcImRhdGFcXFwiOlxcXCJlMWVjM2JjZDZlZjY2MjY0YzFjYzVlNDE2Zjc3YWExNWFjZWNlN2FmODQyZjM2MGRmODM3Zjg4N2U1ZjBlMDg1ZTJkNzg3ZGIwNWRlM2UwNWM5ZjdlNDIyOTIwMTg1ZThjZjlmMzQwYjZlYmFmZTc3ZWI0YTVmNGIyYjhmMjZmOWQ4N2IxMzgzZmY0ODMwYjJlMzNmMjgwZjIwM2ZhZWRmODVhNmRhNWY1MTA2YWNlZDcwMjk1NWIzYzQzYjViMjk0ZGZhYjRhZGVkNGJjMTBlZDgzYjk5ODgwZGQ1MDRiMjNjMzQ5MWZjZWRhNTJiZmY2NGYyODQzNTIyNzdlMTVjXFxcIixcXFwia2V5X2lkXFxcIjpcXFwiMVxcXCIsXFxcInNpZ25cXFwiOlxcXCI4MGI0MmNhZFxcXCJ9XCIiLCJyIjoiaHR0cHM6Ly9zei5mYW5nLmxpYW5qaWEuY29tL2xvdXBhbi9wZzEvIiwib3MiOiJ3ZWIiLCJ2IjoiMC4xIn0=; _ga_QV5NEPS9ZK=GS1.2.1702646803.1.1.1702650716.0.0.0'
    }
    response = requests.get(url, headers)
    if response.status_code == 200:
        responseJson = json.loads(response.content.decode("utf-8"))
        return responseJson['data']['list']
    else:
        return None

# 解析网页数据
def parseData(houseDetailList, city, url):
    # title cover city
    # region address rooms_desc
    # area_range all_ready price
    # houseDecoration company houseType
    # on_time open_date tags
    # totalPrice_range sale_status detail_url
    for house_info in houseDetailList:
        title = house_info['title']
        cover = house_info['cover_pic']
        city = city
        region = house_info['district']
        address = house_info['address']
        # 因为数据库设置的 rooms_desc 字段为varchar，故，需要json.dumps() -- 将数组转码
        rooms_desc = json.dumps(house_info['frame_rooms_desc'].replace('居', '').split('/'))
        area_range = json.dumps(house_info['resblock_frame_area_range'].replace('㎡', '').split('-'))
        all_ready = house_info['permit_all_ready']
        price = house_info['average_price']
        houseDecoration = house_info['decoration']
        company =house_info['developer_company'][0]
        houseType = house_info['house_type']
        on_time = house_info['on_time']
        open_date = house_info['open_date']
        # house_info['tags'] -- 'tags': ['优惠楼盘', '人车分流', '封闭管理', '公交直达'] 是数组 -- 要转换
        tags = json.dumps(house_info['tags'])
        totalPrice_range = json.dumps(house_info['reference_total_price'].split('-'))
        sale_status = house_info['process_status']
        detail_url = 'https://' + re.search('//(.*)/loupan/pg\d/\?_t=1', url).group(1) + house_info['url']
        # print(detail_url)

        # 写入csv文件
        writeRow([
            title,
            cover,
            city,
            region,
            address,
            rooms_desc,
            area_range,
            all_ready,
            price,
            houseDecoration,
            company,
            houseType,
            on_time,
            open_date,
            tags,
            totalPrice_range,
            sale_status,
            detail_url
        ])

# 存入数据库
def save_to_sql():
    with open('houseInfoData.csv', 'r', encoding='utf-8') as reader:
        readerCsv = csv.reader(reader)
        next(readerCsv)
        for r in readerCsv:
            querys('''
                insert into house_info(title,cover,city,region,address,rooms_desc,area_range,all_ready,price,houseDecoration,company,houseType,on_time,open_date,tags,totalPrice_range,sale_status,detail_url)
                values (
                    %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s
                )
            ''', [r[0], r[1], r[2], r[3], r[4], r[5], r[6], r[7], r[8], r[9], r[10], r[11], r[12], r[13], r[14], r[15], r[16], r[17]])

# 主函数
def main():
    init()
    # 读取cityUrlData.csv文件
    with open('./cityUrlData.csv', 'r', encoding='utf-8') as csv_rf:
        reader = csv.reader(csv_rf)
        # 跳过csv文件的第一行
        next(reader)
        for city_data in reader:
            # 爬取每一个城市的每一页
            for page in range(1, 4):
                url = 'https:' + re.sub('pg1', 'pg' + str(page), city_data[1])
                print('正在爬取 %s 城市的房屋数据，正在第 %s 页，路径为：%s' % (
                    city_data[0],
                    page,
                    url
                ))
                # 每一页json数据
                houseDetailList = getData(url)
                parseData(houseDetailList, city_data[0], url)
            break
            # 如果这个城市有问题，跳过该城市所有页
            # try:
            #     for page in range(1, 10):
            #         url = 'https:' + re.sub('pg1', 'pg' + str(page), city_data[1])
            #         print('正在爬取 %s 城市的房屋数据，正在第 %s 页，路径为：%s' % (
            #             city_data[0],
            #             page,
            #             url
            #         ))
            #         # 每一页json数据
            #         houseDetailList = getData(url)
            #         parseData(houseDetailList, city_data[0], url)
            # except:
            #     pass

# 执行函数
if __name__ == '__main__':
    # main()
    save_to_sql()