# encoding:utf-8
import sys, requests, re, uuid, os
sys.path.append('..')
import lxml.etree as etree
from urllib import parse
from random import randint
from six.moves.urllib.parse import urlparse
from user_agent import generate_user_agent
from spider.OSS import upload_to_oss
session = requests.Session()
# from model.hourse_model import HousePictureModel, HouseInfoModel
# from model import DAO, DBSession
# from resource_pool import export_lianjia_to_mongo


def random_requests_header():
    return {
        'Accept': 'text/xml,application/xml,application/xhtml+xml'
                  ',text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.%d'
                  % randint(2, 5),
        'Accept-Language': 'en-us,en;q=0.%d' % (randint(5, 9)),
        'Accept-Charset': 'utf-8,windows-1251;q=0.7,*;q=0.%d'
                          % randint(5, 7),
#         'Keep-Alive': '300',
        'User-Agent': generate_user_agent(),
        'Connection':'keep-alive'
    }


def __vaild_proxy(url, ip, port):
    proxy_addr = 'http://{}:{}'.format(ip, port)
    proxies = { "http": proxy_addr }  
    host = urlparse(url).netloc
    headers = random_requests_header()
    headers['Host'] = host
    resp = None
    try:
        resp = requests.get(url, headers=headers, timeout=2, proxies=proxies, verify=False)
    except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError, AttributeError, ConnectionAbortedError) as e:
        print('Excepitons are raised when crawling {}.Here are details:{}'.format(url, e))
    if resp and resp.status_code == 200:
        return True
    return False


def generate_header():
    head = {}
    head['Host'] = 'bj.lianjia.com'
    head['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:61.0) Gecko/20100101 Firefox/61.0'
    head['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
    head['Accept-Language'] = 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2'
    head['Accept-Encoding'] = 'gzip, deflate, br'
    head['Connection'] = 'keep-alive'
    head['Upgrade-Insecure-Requests'] = '1'
    head['Pragma'] = 'no-cache'
    head['Cache-Control'] = 'no-cache'

    return head


def spider_page(link):
    resp = session.get(link, headers=generate_header())
    if resp and resp.status_code == 200:
        return resp.content.decode('utf-8')
    else:
        return  None


def get_house_links(html):
    mac = re.search('ids: \'(.*?)\'', html)
    if mac:
        ids = mac.group(1).split(',')
        return ids
    return None


def spider_detail_page(detail_id):
    link = 'https://bj.lianjia.com/ershoufang/%s.html' % detail_id
    print(link)
    html = spider_page(link)
    tree = etree.HTML(html)
    info = HouseInfoModel()
    info.HouseNumber = detail_id
    info.DownpaymentUrl = 'https://m.lianjia.com/bj/ershoufang/cost/' + detail_id + '.html?lianjiafrom=message'
    info.Downpayment = get_downpayment(info.DownpaymentUrl)
    title_nodes = tree.xpath('.//h1/text()')
    if title_nodes:
        info.Title = title_nodes[0]
    price_nodes = tree.xpath('.//div[@class="price "]/span[@class="total"]/text()')
    if price_nodes:
        info.Price = price_nodes[0]
    unit_price_nodes = tree.xpath('.//div[@class="unitPrice"]/span[@class="unitPriceValue"]/text()')
    if unit_price_nodes:
        info.UnitPrice = unit_price_nodes[0]
    tag_nodes = tree.xpath('.//div[@class="tags clear"]/div[2]/a')
    if tag_nodes:
        tags_arr = []
        for tag_node in tag_nodes:
            tag_value = tag_node.xpath('.//text()')
            if tag_value and len(tag_value) > 0:
                tags_arr.append(tag_value[0])
        tag = ','.join(tags_arr)
        info.Tag = tag
    type_nodes = tree.xpath('.//div[@class="introContent"]/div[@class="base"]/div[@class="content"]/ul/li[1]/text()')
    if type_nodes:
        info.Type = type_nodes[0]
    house_floor_nodes = tree.xpath('.//div[@class="introContent"]/div[@class="base"]/div[@class="content"]/ul/li[2]/text()')
    if house_floor_nodes:
        info.Floor = house_floor_nodes[0]
    area_nodes = tree.xpath('.//div[@class="introContent"]/div[@class="base"]/div[@class="content"]/ul/li[3]/text()')
    if area_nodes:
        info.Area = area_nodes[0]
    toward_nodes = tree.xpath('.//div[@class="introContent"]/div[@class="base"]/div[@class="content"]/ul/li[7]/text()')
    if toward_nodes:
        info.Toward = toward_nodes[0]
    bulid_type_nodes = tree.xpath('.//div[@class="introContent"]/div[@class="base"]/div[@class="content"]/ul/li[6]/text()')
    if bulid_type_nodes:
        info.BuildingType = bulid_type_nodes[0]
    elevator_nodes = tree.xpath('.//div[@class="introContent"]/div[@class="base"]/div[@class="content"]/ul/li[12]/text()')
    if elevator_nodes:
        info.Elevator = elevator_nodes[0]
    house_decoration_nodes = tree.xpath('.//div[@class="introContent"]/div[@class="base"]/div[@class="content"]/ul/li[9]/text()')
    if house_decoration_nodes:
        info.Decoration = house_decoration_nodes[0]
    house_age_nodes = tree.xpath('.//div[@class="area"]/div[@class="subInfo"]/text()')
    if house_age_nodes:
        age = re.search('\d+', house_age_nodes[0], flags=16)
        if age:
            info.HouseAge = age.group(0)
    house_owner_type_nodes = tree.xpath('.//div[@class="introContent"]/div[@class="transaction"]/div[@class="content"]/ul/li[2]/span[2]/text()')
    if house_owner_type_nodes:
        info.HouseOwnerType = house_owner_type_nodes[0]
    list_time_nodes = tree.xpath('.//div[@class="introContent"]/div[@class="transaction"]/div[@class="content"]/ul/li[1]/span[2]/text()')
    if list_time_nodes:
        info.ListTime = list_time_nodes[0]

    desc_nodes = tree.xpath('.//div[@class="introContent showbasemore"]')
    if desc_nodes:
        tags_nodes = tree.xpath('.//div[@class="tags clear"]|.//div[@class="introContent showbasemore"]/div[@class="viewmore"]|.//div[@class="introContent showbasemore"]/div[@class="disclaimer"]')
        if tags_nodes:
            for remov_node in tags_nodes:
                desc_nodes[0].remove(remov_node)
        desc_html = etree.tostring(desc_nodes[0], encoding='utf-8').decode('utf-8')
        info.Desc = desc_html

    house_estate_nodes = tree.xpath('.//div[@class="communityName"]/a[@class="info "]')
    if house_estate_nodes:
        estate_name = house_estate_nodes[0].xpath('.//text()')[0]
        info.Estate = estate_name
        estate_code = house_estate_nodes[0].xpath('.//@href')[0].split('/')[-2]
        info.EstateUrl = 'https://m.lianjia.com/bj/xiaoqu/' + estate_code
    dog_nodes = tree.xpath('.//a[@class="name LOGCLICK"]')
    if dog_nodes:
        info.AgentName = dog_nodes[0].xpath('.//text()')[0]
    dog_picture_nodes = tree.xpath('.//a[@class="fl LOGVIEW LOGCLICK"]/img/@src')
    if dog_picture_nodes:
        info.AgentPicture = down_picture(dog_picture_nodes[0])
    if DAO.add_one(info):
        parent_id = info.ID
        view_nodes = tree.xpath('.//ul[@class="smallpic"]/li/@data-src') 
        if view_nodes:
            pic_arr = []
            if len(view_nodes) > 9:
                for view_node in view_nodes[0:9]:
                    oss_link = down_picture(view_node)
                    if oss_link:
                        p = HousePictureModel()
                        p.HouseID = parent_id
                        p.HousePicture = oss_link
                        pic_arr.append(p)
            else:
                for view_node in view_nodes[0:9]:
                    oss_link = down_picture(view_node)
                    if oss_link:
                        p = HousePictureModel()
                        p.HouseID = parent_id
                        p.HousePicture = oss_link
                        pic_arr.append(p)
            if len(pic_arr) > 0:
                DAO.add_all(pic_arr)
    print('success: %s' % info.Title)


def down_picture(link):
    picture_id = str(uuid.uuid3(uuid.NAMESPACE_DNS, link))
    file_path = 'D:/lianjia/' + picture_id + '.jpg'
    head = {}
    head['Host'] = 'image1.ljcdn.com'
    head['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:61.0) Gecko/20100101 Firefox/61.0'
    head['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
    head['Accept-Language'] = 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2'
    head['Accept-Encoding'] = 'gzip, deflate, br'
    head['Connection'] = 'keep-alive'
    head['Upgrade-Insecure-Requests'] = '1'
    head['Pragma'] = 'no-cache'
    head['Cache-Control'] = 'no-cache'
#     head.setdefault('Referer',parent_links)
    resp = requests.get(link, headers=head)
    if resp and resp.status_code == 200:
        if not os.path.exists(file_path):
            with open(file_path, 'wb') as f:
                f.write(resp.content)
        oss_link = upload_to_oss(file_path)
        return oss_link
    else:
        return None


def get_m_header():
    head = {}
    head['Host'] = 'm.lianjia.com'
    head['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:61.0) Gecko/20100101 Firefox/61.0'
    head['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
    head['Accept-Language'] = 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2'
    head['Accept-Encoding'] = 'gzip, deflate, br'
    head['Connection'] = 'keep-alive'
    head['Upgrade-Insecure-Requests'] = '1'
    head['Pragma'] = 'no-cache'
    head['Cache-Control'] = 'no-cache'
    return head


def spider_m_page(link):
    resp = session.get(link, headers=get_m_header())
    if resp and resp.status_code == 200:
        return resp.content.decode('utf-8')
    else:
        return  None


def get_downpayment(link):
    html = spider_m_page(link)
    tree = etree.HTML(html)
    pay_node = tree.xpath('.//*[@data-mark="first_pay"]/text()')
    if pay_node:
        return pay_node[0]
    else:
        return 0


house_ids = []


def handler_list_page(link, html):
    g = get_house_links(html)
    if g:
        for detail_id in g:
            if detail_id not in house_ids:
                spider_detail_page(detail_id)
                house_ids.append(detail_id)


# def clear_mysql():
#     db = DBSession()
#     try:
#         db.execute('delete from data_lianjia_house_info')
#         db.execute('delete from data_lianjia_house_pic')
#     except Exception as e:
#         print(e)
#     finally:
#         db.close()


def main():
    # clear_mysql()
    arr = ['天通苑', '国奥村']
    for key  in arr:
        q = parse.quote(key)
        link = 'https://bj.lianjia.com/ershoufang/rs' + q + '/'
        html = spider_page(link)
        print(html)
    #     handler_list_page(link, html)
    #     url_temp = 'https://bj.lianjia.com/ershoufang/pg{page}rs' + q + '/'
    #     mac = re.search('\"totalPage\":(\d+)', html)
    #     if mac:
    #         count = int(mac.group(1))
    #         for x in range(2, count + 1):
    #             link = url_temp.format(page=x)
    #             handler_list_page(link, spider_page(link))
    # export_lianjia_to_mongo()

   
if __name__ == '__main__':
    main()
