# -*- coding: utf-8 -*-
"""
Created on Tue Sep  6 16:05:59 2016

@author: chenwr
"""

import codecs
import re
import time
from functools import reduce
from multiprocessing import Pool

import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup

import cx_Oracle


def url_open(url):
    """ url open """
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.8',
        'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive',
        'Cookie': 'select_city=350200; lianjia_uuid=af9ec94f-c262-4479-87fe-bae33a973984; UM_distinctid=15fc24977a8912-0ceeebd236ea8-5d153b16-15f900-15fc24977a9a45; all-lj=eae2e4b99b3cdec6662e8d55df89179a; Hm_lvt_9152f8221cb6243a53c83b956842be8a=1510793116,1510793631; Hm_lpvt_9152f8221cb6243a53c83b956842be8a=1510794363; _smt_uid=5a0cdf9b.52d4b67b; CNZZDATA1255847100=477360082-1510791356-%7C1510791356; CNZZDATA1254525948=367678637-1510790754-%7C1510790754; CNZZDATA1255633284=1024567459-1510792602-%7C1510792602; CNZZDATA1255604082=1214039791-1510789438-%7C1510789438; _ga=GA1.2.973757450.1510793121; _gid=GA1.2.782279088.1510793121; lianjia_ssid=39d151c0-84a0-4b13-b468-b54f55c717f6',
        'Host': 'xm.lianjia.com',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36'
    }
    while 1:
        try:
            req = requests.get(url=url, headers=headers, timeout=3)
            break
        except:
            print('timeout')
            time.sleep(1)
    html = req.text
    return html


def get_urls(url):
    """ get link """
    print(url)
    html = url_open(url)
    # 解析html
    soup = BeautifulSoup(html, 'html.parser')
    # 定位父级<div>
    divs = soup('div', {'class': 'info clear'})
    # 用来存储每一张house具体链接的列表
    house_url = [div.find('a')['href'] for div in divs]
    return house_url


def get_infos(url):
    """ house information """
    print(url)
    html = url_open(url)
    soup = BeautifulSoup(html, 'html.parser')

    house_info = {}
    house_info['网址'] = url
    house_info['日期'] = time.strftime('%Y-%m-%d', time.localtime())
    soup_area = soup('div', {'class': 'areaName'})[0]('a')
    house_info['辖区'] = soup_area[0].string
    house_info['区域'] = soup_area[1].string
    house_info['小区'] = soup('a', {'class': 'info'})[0].string
    position = re.findall("resblockPosition:'(.+),(.+)'", html)[0]
    house_info['经度'] = position[0]
    house_info['纬度'] = position[1]
    house_info['总价'] = soup('span', {'class': 'total'})[0].string
    house_info['均价'] = soup('span', {'class': 'unitPriceValue'})[0].contents[0]
    house_info['建造年份'] = soup('div', {'class': 'area'})[0](
        'div', {'class': 'subInfo'})[0].string.split('/')[0]
    for name in ['base', 'transaction']:
        for li in soup('div', {'class': name})[0]('li'):
            contents = [content.string.strip()
                        for content in li.contents if content.string.strip()]
            house_info[contents[0]] = contents[1]
    # for name in ['tags clear', 'baseattribute clear']:
    #     soup_baseinforms = soup('div', {'class': name})
    #     for soup_baseinform in soup_baseinforms:
    #         key = soup_baseinform('div', {'class': 'name'})[0].string
    #         value = soup_baseinform('div', {'class': 'content'})[0].get_text()
    #         house_info[key] = value.strip()
    # soup_rows = soup('div', {'id': 'infoList'})[0]('div', {'class': 'row'})
    # for soup_row in soup_rows:
    #     layout = [s.string for s in soup_row('div')]
    #     for i,name in enumerate(['面积', '朝向', '窗户']):
    #         house_info[layout[0] + name] = layout[i + 1]
    data = []
    labels = [
        '上次交易', '交易权属', '产权年限', '产权所属', '区域', '均价',
        '套内面积', '小区', '建筑类型', '建筑结构', '建筑面积', '建造年份',
        '总价', '户型结构', '房屋年限', '房屋户型', '房屋朝向', '房屋用途',
        '房本备件', '房源编码', '所在楼层', '抵押信息', '挂牌时间', '梯户比例',
        '纬度', '经度', '装修情况', '辖区', '配备电梯', '网址',
        '日期'
    ]
    for key in labels:
        if key in house_info:
            data.append(house_info[key])
        else:
            data.append('')
    conn = cx_Oracle.connect('guest', '111111', 'localhost:1521/XE')
    cur = conn.cursor()
    cur.execute("insert into HOUSE_TAB(%s) values('%s')" %
                (",".join(labels), "','".join(data)))
    cur.close()
    conn.commit()
    conn.close()


def download_house():
    """ download house"""
    # get urls
    urls = ['http://xm.lianjia.com/ershoufang/pg%d/' %
            page for page in range(1, 101)]
    pool = Pool()
    house_urls = pool.map(get_urls, urls)
    pool.close()    # 关闭进程池，不再接受新的进程
    pool.join()     # 主进程阻塞等待子进程的退出
    urls = reduce(lambda x, y: x + y, house_urls)
    # get_infos(urls[0])

    # distinct urls
    urls = set(urls)
    conn = cx_Oracle.connect('guest', '111111', 'localhost:1521/XE')
    cur = conn.cursor()
    cur.execute(
        "SELECT 网址 FROM HOUSE_TAB WHERE 日期 = TO_CHAR(CURRENT_DATE, 'yyyy-mm-dd')")
    urls_ = cur.fetchall()
    urls_ = [url[0] for url in urls_]
    cur.close()
    conn.close()
    urls = [url for url in urls if url not in urls_]
    print(len(urls))
    if len(urls) == 0:
        return None

    # get data
    pool = Pool()
    pool.map(get_infos, urls)
    pool.close()    # 关闭进程池，不再接受新的进程
    pool.join()     # 主进程阻塞等待子进程的退出


def write_html():
    """ write html"""
    conn = cx_Oracle.connect('guest', '111111', 'localhost:1521/XE')
    df = pd.read_sql(
        'select 经度,纬度,小区,房屋户型,建筑面积,总价,均价,网址,日期 from house_tab where 日期 = (SELECT max(日期) from house_tab)',
        conn
    )
    conn.close()

    df.drop_duplicates(inplace=True)
    df['地址'] = df[['经度', '纬度', '小区']].apply(
        lambda x: "%s,%s,'%s'" % (x['经度'], x['纬度'], x['小区']), axis=1)
    df['信息'] = df[['房屋户型', '建筑面积', '总价']].apply(
        lambda x: '%s\t%s\t%s万' % (x['房屋户型'], x['建筑面积'], x['总价']), axis=1)
    df['均价'] = df['均价'].map(float)
    df_house = df[['地址', '均价', '信息', '网址']].groupby('地址').agg({
        '均价': np.mean,
        '信息': lambda x: ','.join(map(lambda y: "'%s'" % y, x)),
        '网址': lambda x: ','.join(map(lambda y: "'%s'" % y, x)),
    }).reset_index()
    df_house['均价'] = df_house['均价'].map(int)
    df_house.sort_values(by='均价', ascending=False, inplace=True)
    price_df = df_house.apply(lambda x: "[%s,%s,[%s],[%s]]" % (
        x['地址'], x['均价'], x['信息'], x['网址']), axis=1)

    data_str = 'var points = [' + ','.join(price_df) + '];\n' + \
        '\t\tvar price_min = ' + str(df_house['均价'].min()) + ';\n' + \
        '\t\tvar price_max = ' + str(df_house['均价'].max()) + ';\n'
    with open(r'.\厦门房价.html', 'r', encoding='utf-8') as f:
        s = f.read()
    with codecs.open(r'.\html\厦门房价%s.html' % df['日期'].values[0], 'w', 'utf-8') as f:
        f.write(s.replace('// 值写入', data_str))


if __name__ == '__main__':
    download_house()
    write_html()
