import json
import math
import time

import requests
import re
import os
import sys
import csv
import pymysql

from utils.query import querys


def init():
    if not os.path.exists('./hourseData.csv'):
        with open('./hourseData.csv', 'w', encoding='utf-8', newline='') as fw:
            writer = csv.writer(fw)
            # 楼盘名, 封面, 市区, 地区, 详情地址, 户型, 面积, 是否有预售证, 每平价格, 装修情况, 公司, 房屋类型, 交房时间, 开盘时间, 标签, 总价, 售房状态, 详情链接
            writer.writerow([
                'title',
                'cover',
                'city',
                'region',
                'address',
                'rooms_desc',
                'area_range',
                'all_ready',
                'price',
                'hourse_decoration',
                'company',
                'house_type',
                'on_time',
                'open_date',
                'tags',
                'total_price_range',
                'sale_status',
                'detail_url',
            ])

def writerRow(row):
    with open('./hourseData.csv', 'a', encoding='utf-8', newline='') as fw:
        writer = csv.writer(fw)
        writer.writerow(row)


def getHtmlData(url):
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
            'Cookie': 'lianjia_uuid=0ee865ab-5c6e-47e9-82bb-4f9ae2c5e5ef; _smt_uid=66af8107.1c74e0d6; _jzqc=1; _jzqy=1.1722777864.1722777864.1.jzqsr=baidu|jzqct=%E9%93%BE%E5%AE%B6.-; _jzqckmp=1; _ga=GA1.2.183484824.1722777866; _gid=GA1.2.2050070381.1722777866; Hm_lvt_9152f8221cb6243a53c83b956842be8a=1722777868; HMACCOUNT=79145C4E45B1C2C0; _jzqa=1.2083610611390549500.1722777864.1722777864.1722782111.2; _jzqx=1.1722782111.1722782111.1.jzqsr=gz%2Elianjia%2Ecom|jzqct=/ershoufang/rs/.-; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%221911d90080e1106-0d95236398d12-26001e51-1395396-1911d90080f16ed%22%2C%22%24device_id%22%3A%221911d90080e1106-0d95236398d12-26001e51-1395396-1911d90080f16ed%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_utm_source%22%3A%22baidu%22%2C%22%24latest_utm_medium%22%3A%22pinzhuan%22%2C%22%24latest_utm_campaign%22%3A%22wygz%22%2C%22%24latest_utm_content%22%3A%22biaotimiaoshu%22%2C%22%24latest_utm_term%22%3A%22biaoti%22%7D%7D; Hm_lpvt_9152f8221cb6243a53c83b956842be8a=1722784358; _ga_654P0WDKYN=GS1.2.1722782116.2.1.1722784371.0.0.0; _jzqa=1.2083610611390549500.1722777864.1722777864.1722782111.2; _jzqc=1; _ga_RCTBRFLNVS=GS1.2.1722784669.1.1.1722784676.0.0.0; _ga_6F76ZVFRYC=GS1.2.1722784379.1.1.1722785463.0.0.0; select_city=442000; lj_newh_session=eyJpdiI6Im5GZlRQNWdZN20yT1VwK0psRzNXYWc9PSIsInZhbHVlIjoiTWJsSm1zaG5RQ0gxSmxnbHp5azFobUo5bE8rZGhPNWc1Y3g4bXZ1UzVMMjNxdEtQb0JvVkc5d3FjNGJXQlwvWllIdXBwbXlGME5sRTh4b0M0SjdkRXpnPT0iLCJtYWMiOiJlNDYyNDZjMjQyOWE0YTI1YjczMmZiNzJkZDRlZDEyOWIwYzZjZGE5YzUyZTg5NmM0MzVkZTIyZWZlNDQyNmQzIn0%3D',
            'Referer': url.split('?_t')[0]
            # 'Referer': 'https://gz.fang.lianjia.com/loupan/pg2/'
        }
        resp = requests.get(url, headers=headers)
        if resp.status_code == 200:
            print(resp.raise_for_status())
            print(resp.json()['data']['list'])
            return resp.json()['data']['list'], resp.json()['data']['total']
    except Exception as err:
        print(f'-- requestError: {err}')
    return [], 0


def parseHourseData(hourseDataList, city, url):
    for hourse in hourseDataList:
        try:
            # 2/3/4居
            roomsDesc = json.dumps(hourse['frame_rooms_desc'].replace('居', '').split('/'))

            # 82-140㎡
            areaRange = json.dumps(hourse['resblock_frame_area_range'].replace('㎡', '').split('/'))

            # ["车位充足","绿化率高","温泉入户","贴心物业"]
            tags = json.dumps(hourse['tags'])

            # 120-175
            totalPrice = json.dumps(hourse['reference_total_price'].split('-'))

            # /loupan/p_hhwxhtbljjq/
            # https://bd.fang.lianjia.com/loupan/p_hyjhybmhyj/?fb_expo_id=872633920829640704/
            # detailUrl = url.split('/loupan')[0] + hourse['url'] + '?fb_expo_id=' + hourse['fb_expo_id']
            detailUrl = url.split('/loupan')[0] + hourse['url']

            # 楼盘名, 封面, 市区, 地区, 详情地址, 户型, 面积, 是否有预售证, 每平价格, 装修情况, 公司, 房屋类型, 交房时间, 开盘时间, 标签, 总价, 售房状态, 详情链接
            writerRow([
                hourse['title'],
                hourse['cover_pic'],
                hourse['city_name'],
                hourse['district'],
                hourse['address'],
                roomsDesc,
                areaRange,
                hourse['permit_all_ready'],
                hourse['average_price'],
                hourse['decoration'],
                hourse['developer_company'][0],
                hourse['house_type'],
                hourse['on_time'],
                hourse['open_date'],
                tags,
                totalPrice,
                hourse['process_status'],
                detailUrl
            ])
        except Exception as err:
            print(f'---- writeErr: {err}')
            continue
    return



def saveToMysql():
    with open('./hourseData.csv', 'r', encoding='utf-8') as fr:
        reader = csv.reader(fr)
        next(reader)
        for hourse in reader:
            print(hourse)
            try:
                sql = '''
                    INSERT INTO hourse_info(title, cover, city, region, address, rooms_desc, area_range, all_ready, price, hourse_decoration, company, hourse_type, on_time, open_date, tags, total_price_range, sale_status, detail_url)
                    VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
                '''
                # print(sql)
                querys(sql, hourse)
            except Exception as err:
                print(f'-- saveErr: {err}')
            # break



def main():
    init()

    with open('./cityData.csv', 'r', encoding='utf-8') as fr:
        reader = csv.reader(fr)
        next(reader)
        for city in reader:
            if city[0] == '包头':
                print('---------------------------------')
                continue
            currentPage = 1
            totalPage = 1
            while currentPage <= totalPage:
                try:
                    # https://zs.fang.lianjia.com/loupan/pg1/?_t=1/
                    url = 'https:' + city[1].replace('pg1', 'pg' + str(currentPage))
                    print(f'-- 正在采集[ {city[0]} ]城市的第{currentPage}页的房屋数据, url: {url}')
                    hoursePageDataList, hourseTotalData = getHtmlData(url)
                    currentPage += 1
                    totalPage = math.ceil(int(hourseTotalData) / 10)
                    print(currentPage, totalPage)
                    parseHourseData(hoursePageDataList, city[0], url)
                except Exception as err:
                    print(f'--- 采集异常: {err}')
                    continue
                time.sleep(0.01)
            time.sleep(1)
            # break
    return


if __name__ == '__main__':
    # main()
    saveToMysql()
    pass