import csv
import re
import os
import requests
from pymysql import *
import os
import sys
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
sys.path.append(parent_dir)

from utils.query import querys
import json
# 数据表house_info结构如下
# 房名 市区 地区 详情地址 房型详情 建面 是否具有预售证 每平价格 房屋的装修情况（毛坯，简装修）
# 公司 房屋类型（别墅） 交房时间 开盘时间 标签 总价区间 售房情况（在售）  详情链接
def init():
    if not os.path.exists('./houseInfoData.csv'):
        with open("./houseInfoData.csv", "w", encoding="UTF-8", newline="") as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow([
                'title','cover','city','region','address','rooms_desc','area_range','all_ready','price','houseDecoration','company','houseType','on_time','open_date','tags','totalPrice_range','sale_status','detail_url'
            ])

# 保存数据到MySQL数据库表中
def save_to_sql():
    with open('./houseInfoData.csv','r',encoding='utf-8') as reader:
        readerCsv = csv.reader(reader)
        next(readerCsv)
        for h in readerCsv:
            # hsInfo = house_info(title=h[0],cover=h[1],city=h[2],region=h[3],address=h[4],rooms_desc=h[5],area_range=h[6],all_ready=h[7],price=h[8],houseDecoration=h[9],company=h[10],houseType=h[11],on_time=h[12],open_date=h[13],tags=h[14],totalPrice_range=h[15],sale_status=h[16],detail_url=h[17])
            # db.session.add(hsInfo)
            # db.session.commit()
            querys('''
                insert into house_info(title,cover,city,region,address,rooms_desc,area_range,all_ready,price,houseDecoration,company,houseType,on_time,open_date,tags,totalPrice_range,sale_status,detail_url)
                values(
                    %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
                )
            ''', [h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7], h[8], h[9], h[10], h[11], h[12], h[13], h[14], h[15],
                  h[16], h[17]]
                   )
            print('已经成功将该条数据插入！')
    print('恭喜您！已经将数据全部导入MySQL数据表！')

# 写入数据到CSV文件
def writerRow(rowData):
    with open("./houseInfoData.csv", "a", encoding="UTF-8", newline="") as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(rowData)

# 对数据进行分析
def parse_data(houseDataList,city,url):
    for houseInfo in houseDataList:
        title = houseInfo['title']
        cover = houseInfo['cover_pic']
        region = houseInfo['district']
        address = houseInfo['address']
        rooms_desc = json.dumps(houseInfo['frame_rooms_desc'].replace('居','').split('/'))
        area_range = json.dumps(houseInfo['resblock_frame_area_range'].replace('㎡','').split('-'))
        all_ready = houseInfo['permit_all_ready']
        price = houseInfo['average_price']
        houseDecoration = houseInfo['decoration']
        company = houseInfo['developer_company'][0]
        houseType = houseInfo['house_type']
        on_time = houseInfo['on_time']
        open_date = houseInfo['open_date']
        tags = json.dumps(houseInfo['tags'])
        totalPrice_range = json.dumps(houseInfo['reference_total_price'].split('-'))
        sale_status = houseInfo['process_status']
        detail_url = 'https://' + re.search('//(.*)/loupan/pg1/\?_t=1',url).group(1) + houseInfo['url']
        writerRow([title,cover,city,region,address,rooms_desc,area_range,all_ready,price,houseDecoration,company,houseType,on_time,open_date,tags,totalPrice_range,sale_status,detail_url])

def get_data(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
        # 请求头
        'Referer':'https://bj.fang.lianjia.com/loupan/pg2/',
        # 输入自己的cookie
        'cookie':'lianjia_uuid=75d60c76-ce9b-4101-ac09-57ac4f563814; _smt_uid=649be54d.ac72699; _jzqa=1.4022756256930896000.1687938382.1687938382.1687938382.1; _jzqy=1.1687938382.1687938382.1.jzqsr=baidu.-; _jzqckmp=1; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2218900f7b917a16-060e71bafc7b5a-26031f51-2073600-18900f7b918ef0%22%2C%22%24device_id%22%3A%2218900f7b917a16-060e71bafc7b5a-26031f51-2073600-18900f7b918ef0%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22https%3A%2F%2Fwww.baidu.com%2Flink%22%2C%22%24latest_referrer_host%22%3A%22www.baidu.com%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%7D%7D; _ga=GA1.2.1631881632.1687938384; _gid=GA1.2.1237086536.1687938384; _ga_4JBJY7Y7MX=GS1.2.1687938383.1.0.1687938383.0.0.0; _ga_6DHGZS4SHY=GS1.2.1687938386.1.1.1687939163.0.0.0; _jzqx=1.1687944603.1687944603.1.jzqsr=xianyang%2Efang%2Elianjia%2Ecom|jzqct=/loupan/pg2/.-; lianjia_ssid=6bd424c5-da6f-44ef-a090-5b3a21e6bd1c; select_city=110000; lj_newh_session=eyJpdiI6IkU4N1JkWkdcL1luSDFPbDlxeGhkTFZnPT0iLCJ2YWx1ZSI6Iit6ekZ2bURuQ3ZQYUEwM2xpTm9Eb3V6cDlNVEFnRVB6NERudG9lc1hcLzZlZllFNlU2K0psM2Q0bHNFNTQwcERTcW43ZzlKQlROUkgxNFJONkVNOHVYQT09IiwibWFjIjoiYWM4ZTllMWM2MTRkN2MzNGQ2NDVmN2Q4NzUzNTQ1ZDcwNjM3NjE4N2EzYTNiOTdiNTQ3YTA5ZDQyZDA0OGJkNyJ9; digData=%7B%22key%22%3A%22loupan_index%22%7D; _gat=1; _gat_global=1; _gat_new_global=1; _gat_dianpu_agent=1; srcid=eyJ0IjoiXCJ7XFxcImRhdGFcXFwiOlxcXCI2Zjk1ODVhYWIyOWRiNWQzOGNkMjE3MjEzMzI4MmExZDg4NWY4MmQwNThmMDY1MWQyNzdiYWUxOTI1YjkwZmU1NjdhNjU3MWVkYmJhMjQ2YTQyZjg1NDljMDljZTdlZWIxNGJhN2Y2Zjc2MzQ0NGEwZmQ2YmRiZTFiNmIyYjhmZjVjNzU3ZGJjOTA1MzZhNzE3MTczMTZlM2YxZWM3NjAyZjAxYjgyOGQ0ZmViMTg1MzZmNTUxZmJkOTI3NGViYTExYzFkN2Y0NzU0MzExMmY4ZmQ2NDM0MDhhMGExZmE2OGEyMzZkZWFmNGU1ZTFjZmYzNDQ4NjIxZGI0MTFjMmFlXFxcIixcXFwia2V5X2lkXFxcIjpcXFwiMVxcXCIsXFxcInNpZ25cXFwiOlxcXCJlNzA1YTMxOVxcXCJ9XCIiLCJyIjoiaHR0cHM6Ly9iai5mYW5nLmxpYW5qaWEuY29tL2xvdXBhbi8iLCJvcyI6IndlYiIsInYiOiIwLjEifQ==; _jzqa=1.4022756256930896000.1687938382.1687938382.1688018203.2; _jzqc=1; _qzja=1.1570753206.1687938414368.1687938414368.1688018202885.1687940863834.1688018202885.0.0.0.15.2; _qzjc=1; _qzjto=1.1.0; _ga_RCTBRFLNVS=GS1.2.1688018203.2.0.1688018203.0.0.0; _jzqb=1.1.10.1688018203.1; _qzjb=1.1688018202885.1.0.0.0'
    }
    response = requests.get(url,headers=headers)       # requests.get爬取数据
    if response.status_code == 200:
        return response.json()['data']['list']
    else:
        return None


def main():
    init()
    with open('./cityData.csv','r', encoding="UTF-8", ) as readerF:
        reader = csv.reader(readerF)
        next(reader)
        for city in reader:
            try:
                for page in range(1, 10):
                    print('正在爬取 %s 城市的房屋数据正在第 %s 页 路径为：%s' % (
                    city[0], page, 'https:' + re.sub('pg1', 'pg' + str(page), city[1])))
                    houseDataList = get_data('https:' + re.sub('pg1', 'pg' + str(page), city[1]))    # 调用get_data爬取数据
                    parse_data(houseDataList, city[0], city[1])
            except:
                continue

if __name__ == "__main__":
    # 选择下面的一个函数运行
    main()      # 第一步： 启动爬虫爬取数据
    # save_to_sql()  # 第二步： 保存数据到MySQL数据表

