#!/usr/bin/env python
# coding=utf-8
# author: Zeng YueTian
# 获得指定城市的所有新房楼盘数据


import math
from lib.utility.date import *
from lib.utility.path import *
from lib.url.xiaoqu import *
from lib.city.city import *
from lib.city.loupan import *
import  re
import json
from lib.utility.version import PYTHON_3


def collect_city_loupan(city_name, fmt="csv"):
    """
    将指定城市的新房楼盘数据存储下来，默认存为csv文件
    :param city_name: 城市
    :param fmt: 保存文件格式
    :return: None
    """
    global total_num, today_path
    csv_file = today_path + "/{0}.csv".format(city_name)
    with open(csv_file, "w", encoding='utf-8') as f:
        # 开始获得需要的板块数据
        loupans = get_loupan_info(city_name)
        total_num = len(loupans)
        if fmt == "csv":
            for loupan in loupans:
                f.write(date_string + "," + loupan.text() + "\n")
    print("Finish crawl: " + city_name + ", save data to : " + csv_file)

def huxingInfo(url):
    print(url)
    page = url
    # print(page)
    try:
        response = requests.get(page, timeout=10)
        response.encoding = 'gbk'
        html = response.text
        # print(html)
    except Exception as e:
        print(e)
        return '无在售户型'
    soup = BeautifulSoup(html, "lxml")
    # print(soup)
    huxing_messsage = soup.find('div',class_='huxing_messsage')
    if huxing_messsage is not None:
        hxName = '户型:' + huxing_messsage.find('strong',id='hxName').get_text()

        information_li = huxing_messsage.find_all('div',class_='information_li')

        if information_li is not None:
            for i in range(len(information_li)):
                if i < 5:
                    # print(information_li[i].get_text())
                    hxName = hxName+ '|' + information_li[i].get_text()

        return hxName.replace('\n', '').replace(' ', '').replace('\t', '').replace('\xa0', ' ');
def getHuxing(url):
    print(url)
    page = url
    # print(page)
    try:
        response = requests.get(page, timeout=10)
        response.encoding = 'gbk'
        html = response.text
        # print(html)
    except Exception as e:
        print(e)
        return '无在售户型'
    soup = BeautifulSoup(html, "lxml")
    listnew = soup.find_all('p',class_='tiaojian')
    # print(listnew)
    list = ''
    if listnew is not None:
        for i in range(len(listnew)):
            # print()
            if listnew[i].a['href'] is not None:
                if i == len(listnew):
                    list = list + huxingInfo(listnew[i].a['href'])
                else:
                    list = list + huxingInfo(listnew[i].a['href']) + ' '

    return list;
def get_xiaoqu_info(urlNmae):
    # print(urlNmae)
    page = urlNmae
    # print(page)
    try:
        response = requests.get(page, timeout=10)
        response.encoding = 'gbk'
        html = response.text
        # print(html)
    except Exception as e:
        print(e)
        return '无在售户型'
    soup = BeautifulSoup(html, "lxml")
    allhuxing = ''
    gengduo = soup.find('div',class_='huxing')
    if gengduo is not None:
        gengduo = gengduo.find('div',class_='bigtit').find_all('a')
        if gengduo is not None:
            for i in range(len(gengduo)):
                if i == len(gengduo) - 1:
                    link = gengduo[i]['href'];
                    allhuxing =  getHuxing(link)
    # print(allhuxing)
    zoushi = soup.find('div', class_='fjzs')
    if zoushi is None or zoushi.script is None or zoushi.script.string is None:
        zoushi = '走势待定'
    else:
        # print(zoushi.script.string)
        zoushi = zoushi.script.string.replace('\n', '').replace(' ', '').encode('utf-8').decode('unicode-escape')
        # print(zoushi)
        p1 = r"{.*}"  # 这是我们写的正则表达式规则，你现在可以不理解啥意思
        pattern1 = re.compile(p1)  # 我们在编译这段正则表达式
        matcher1 = re.search(pattern1, zoushi)  # 在源文本中搜索符合正则表达式的部分
        # print(matcher1)
        zoushi = matcher1.group(0)  # 打印出来
        # print(zoushi)
        data = json.loads(zoushi)
        # print(data)
        title = data['title']
        # print(title)
        series = data['series']
        descTmp = ''
        for i in range(len(series)):
            descData = series[i]['data']
            # print(descData)
            descTmp = descTmp + series[i]['name'] + '  '
            if descData is not None:
                count = 0
                for j in range(len(descData)):
                    if descData[j][1] is not None:
                        count = count + 1
                        descTmp = descTmp + str(descData[j][0]) + ':' + str(descData[j][1]) + '元  '



                if count <= 0:
                    descTmp = descTmp + '无,'
                else:
                    descTmp = descTmp + ','

        allhuxing = allhuxing + ',' + descTmp
    dongtai = soup.find('div', class_='clearfix lpStory')
    # print(dongtai)
    if dongtai is not None:
        dongtai = dongtai.find('dd')

        if dongtai is not None:
            # print(dongtai.p.a)
            dongtai = dongtai.p.a.contents[0]
        else:
            dongtai = '暂无动态'
    else:
        dongtai = '暂无动态'
    # print(dongtai)
    allhuxing = allhuxing + ',' + dongtai
    return allhuxing
def get_loupan_info(city_name):
    """
    爬取页面获取城市新房楼盘信息
    :param city_name: 城市
    :return: 新房楼盘信息列表
    """
    loupan_list = list()
    #page = 'http://{0}.fang.lianjia.com/loupan/'.format(city_name)
    page ='http://newhouse.{0}.fang.com/house/s/'.format(city_name)
    # print(page)
    try:
        response = requests.get(page, timeout=10)
        response.encoding = 'gbk'
        html = response.text
    except Exception as e:
        print(e)
        return ''
    soup = BeautifulSoup(html, "lxml")
    # print(soup)
    # 获得总的页数
    try:
        page_box = soup.find_all('div',class_="page")[0]
        # print(page_box)
        matches = page_box.ul.li.strong.string
        print(matches)
        total_page = math.ceil(int(matches) / 20)
    except Exception as e:
        # print("\tWarning: only find one page for {0}".format(city_name))
        # print("\t" + e.message)
        total_page = 1
    # total_page = 1
    # 从第一页开始,一直遍历到最后一页
    for i in range(1, total_page + 1):
        # if i < 11:
        #     continue
        page = 'http://newhouse.{0}.fang.com/house/s/b9{1}'.format(city_name, i)
        print(page)
        try:
            response = requests.get(page, timeout=10)
            response.encoding = 'gbk'
            html = response.text
        except Exception as e:
            print(e)
            break
        soup = BeautifulSoup(html, "lxml")
        # print(soup)
        # 获得有小区信息的panel
        house_elements = soup.find_all('div', class_="nlc_details")
        # print(house_elements)
        for house_elem in house_elements:
            # print(house_elem)
            price = house_elem.find('div', class_="nhouse_price")
            if price is None:
                price = '价格待定'
            else:
                price = price.span.string
            # print(price)
            #在售状态
            total = house_elem.find('div', class_="fangyuan")
            if total is None:
                total = '无在售信息'
            else:
                total = total.span.string

            xiaoqu = house_elem.find('div', class_='nlcd_name')
            huxing = '无在售户型'
            if xiaoqu is None:
                xiaoqu = '待定'
            else:
                link = xiaoqu.a['href']
                xiaoqu = xiaoqu.a.string
                huxing = get_xiaoqu_info(link)
                # print(huxing)
            address = house_elem.find('div', class_='address')
            if address is None:
                address = '待定'
            else:
                address = address.a.string

            # print(address)
            # 作为对象保存
            loupan = LouPan(xiaoqu, price, total, address, huxing)
            loupan_list.append(loupan)
            # break
    return loupan_list


# -------------------------------
# main函数从这里开始
# -------------------------------
if __name__ == "__main__":
    # 让用户选择爬取哪个城市的出租房价格数据
    prompt = create_prompt_text()
    # 判断Python版本
    # if not PYTHON_3:  # 如果小于Python3
    #     city = raw_input(prompt)
    # else:
    #     city = input(prompt)
    city = 'nc'
    # print('OK, start to crawl ' + get_chinese_city(city))

    total_num = 0

    # 准备日期信息，爬到的数据存放到日期相关文件夹下
    date_string = get_date_string()
    print('Today date is: %s' % date_string)
    today_path = create_date_path("loupan", city, date_string)

    t1 = time.time()  # 开始计时
    collect_city_loupan(city)
    t2 = time.time()    # 计时结束，统计结果

    print("Total crawl {0} loupan.".format(total_num))
    print("Total cost {0} second ".format(t2 - t1))
