import config
import asyncio
import aiohttp
from lxml import etree
from retrying import retry
import lianjiaUtil as lju
from lianjiaUtil import MysqlUtil
import requests


class MysqlService(object):
    """
    mysql应用类
    """

    @staticmethod
    def create_table():
        """
        创建表
        城市表：id,城市名，城市链接，状态码（-1抓取失败，0未抓取，1抓取中，2抓取成功），抓取时间
        二手房表：
        """
        city_sql = f"""create table if not exists `{config.CITY_TABLE}` (
                        `id` varchar(64) not null comment "城市id",
                        `city` varchar(50) not null comment "城市名",
                        `city_url` varchar(64) not null comment "城市链接",
                        `status_code` int(1) not null comment "状态码：0未抓取，1正在抓取，2抓取成功，-1抓取失败",
                        primary key (`id`))"""
        #               `crawl_time` datetime not null default now() comment "抓取时间",
        room_sql = f"""create table if not exists `{config.ROOM_TABLE}` (
                        `id` varchar(64) not null comment "二手房id",
                        `title` varchar(64) not null comment "标题",
                        `room_url` varchar(64) not null comment "详情页链接",
                        `place` varchar(64) not null comment "地点",
                        `follow` varchar(64) not null comment "关注数",
                        `updates` varchar(64) not null comment "发布时间",
                        `totalPrice` varchar(64) not null comment "总价格",
                        `unitPrice` varchar(64) not null comment "面积的价格",
                        `visitTime` varchar(64) not null comment "看房时间",
                        `type` varchar(64) not null comment "房屋户型",
                        `floor` varchar(64) not null comment "所在楼层",
                        `floor_area` varchar(64) not null comment "建筑面积",
                        `structure` varchar(64) not null comment "户型结构",
                        `inside_area` varchar(64) not null comment "套内面积",
                        `building_type` varchar(64) not null comment "建筑类型",
                        `orientation` varchar(64) not null comment "房屋朝向",
                        `building_structure` varchar(64) not null comment "建筑结构",
                        `renovation` varchar(64) not null comment "装修情况",
                        `ladder` varchar(64) not null comment "梯户比例",
                        `elevator` varchar(64) not null comment "配备电梯",
                        primary key (`id`))"""
        MysqlUtil.create(city_sql)
        MysqlUtil.create(room_sql)

    @staticmethod
    def insert_data(table, data):
        """将信息插入数据库"""
        keys = ','.join(data.keys())
        values = ','.join(['% s'] * len(data))
        sql = f"insert ignore into {table} ({keys}) value ({values})"
        MysqlUtil.insert(sql, data.values())

    @staticmethod
    def update_status_code(table, status_code, id):
        """更新状态码"""
        sql = f"update {table} set status_code={status_code} where id={id}"
        MysqlUtil.updata(sql)
        pass

    @staticmethod
    def query_data(table):
        """查询数据库数据"""
        query_city_sql = f"""select id,city,city_url,status_code from `{config.CITY_TABLE}`"""
        return MysqlUtil.query(query_city_sql)


class ParseService(object):
    """
    解析类
    """

    @staticmethod
    def city_parse(html):
        """提取城市信息"""
        tree = etree.HTML(html)
        li_list = tree.xpath('//ul[@class="city_list_ul"]/li/div[2]/div/ul/li')
        item_list = list()
        for li in li_list:
            item = dict()
            city_name = li.xpath('./a/text()')  # 城市名
            city_url = li.xpath('./a/@href')  # 城市链接
            item['city'] = city_name[0] if city_name else ''
            item['city_url'] = city_url[0] + "ershoufang/" if city_url else ''  # 补充完整城市链接
            item['id'] = lju.hash_sha1(item['city'] + item['city_url'])
            item['status_code'] = 0
            item_list.append(item)
        return item_list

    @staticmethod
    def page_parse(html):
        """提取页面二手房列表和简要信息"""
        tree = etree.HTML(html)
        li_list = tree.xpath('//li[@class="clear LOGVIEWDATA LOGCLICKDATA"]')
        item_list = list()
        for li in li_list:
            item = dict()
            title = li.xpath('./div[1]/div[1]/a/text()')
            room_url = li.xpath('./div[1]/div[1]/a/@href')
            place = li.xpath('./div[1]/div[2]/div[1]/a/text()')
            follow = li.xpath('./div[1]/div[4]/text()')
            totalPrice1 = li.xpath('./div[1]/div[6]/div[1]/span/text()')
            totalPrice2 = li.xpath('./div[1]/div[6]/div[1]/i[2]/text()')
            unitPrice = li.xpath('./div[1]/div[6]/div[2]/span/text()')
            item["title"] = title[0] if title else ""                       # 标题
            item["room_url"] = room_url[0] if room_url else ""              # 详情页链接
            item["place"] = place[0] if place else ""                       # 地点
            follow = follow[0].split('/')
            item["follow"] = follow[0] if len(follow) == 2 else ""          # 关注数
            item["updates"] = follow[1] if len(follow) == 2 else ""          # 发布时间
            item["totalPrice"] = totalPrice1[0] + totalPrice2[0] if totalPrice2 and totalPrice1 else ""     # 总价格
            item["unitPrice"] = unitPrice[0] if unitPrice else ""           # 面积的价格
            item_list.append(item)
        return item_list


    @staticmethod
    def room_parse(data, html):
        """提取二手房信息"""
        item = dict()
        item.update(data)
        tree = etree.HTML(html)
        # total = tree.xpath('//span[@class="total"]/text()')
        visitTime = tree.xpath('//div[@class="visitTime"]/span[2]/text()')
        lis = tree.xpath('//div[@class="base"]/div[2]/ul')
        lis = lis[0] if lis else exit()
        type = lis.xpath('./li[1]/text()')
        floor = lis.xpath('./li[2]/text()')
        floor_area = lis.xpath('./li[3]/text()')
        structure = lis.xpath('./li[4]/text()')
        inside_area = lis.xpath('./li[5]/text()')
        building_type = lis.xpath('./li[6]/text()')
        orientation = lis.xpath('./li[7]/text()')
        building_structure = lis.xpath('./li[8]/text()')
        renovation = lis.xpath('./li[9]/text()')
        ladder = lis.xpath('./li[10]/text()')
        elevator = lis.xpath('./li[11]/text()')
        # item["total"] = total[0] if total else ""                               # 房价
        item["visitTime"] = visitTime[0] if visitTime else ""                   # 看房时间
        item["type"] = type[0] if type else ""                                  # 房屋户型
        item["floor"] = floor[0] if floor else ""                               # 所在楼层
        item["floor_area"] = floor_area[0] if floor_area else ""                # 建筑面积
        item["structure"] = structure[0] if structure else ""                   # 户型结构
        item["inside_area"] = inside_area[0] if inside_area else ""             # 户型结构
        item["building_type"] = building_type[0] if building_type else ""       # 户型结构
        item["orientation"] = orientation[0] if orientation else ""             # 户型结构
        item["building_structure"] = building_structure[0] if building_structure else ""        # 户型结构
        item["renovation"] = renovation[0] if renovation else ""                # 户型结构
        item["ladder"] = ladder[0] if ladder else ""                            # 户型结构
        item["elevator"] = elevator[0] if elevator else ""                      # 户型结构
        return item


class CrawlService(object):
    """
    抓取类
    """
    HEADERS = {"User-Agent": f"{config.USER_AGENT}"}

    # @retry(stop_max_attempt_number=config.STOP_MAX_ATTEMPT_NUMBER, wait_random_min=config.WAIT_RANDOM_MIN,
    #        wait_random_max=config.WAIT_RANDOM_MAX)
    def crawl_citys(self):
        """获取城市名和链接地址"""
        url = "https://www.lianjia.com/city/"
        resp = requests.get(url=url, headers=self.HEADERS)
        if resp.status_code != 200:
            print(f"获取所有城市时出错，状态码:{resp.status_code}，程序退出。")
            exit()
        resp.encoding = "utf-8"
        return ParseService.city_parse(resp.text)

    # @retry(stop_max_attempt_number=config.STOP_MAX_ATTEMPT_NUMBER, wait_random_min=config.WAIT_RANDOM_MIN,
    #        wait_random_max=config.WAIT_RANDOM_MAX)
    async def crawl_page(self, pag_url):
        """获取页面二手房列表"""
        async with aiohttp.ClientSession() as session:
            async with session.get(url=pag_url, headers=self.HEADERS) as resp:
                if resp.status != 200:
                    print(f"获取所有二手房信息时出错，状态码:{resp.status}，程序退出。")
                    exit()
                page_html = await resp.text()
                return ParseService.page_parse(page_html)

    # @retry(stop_max_attempt_number=config.STOP_MAX_ATTEMPT_NUMBER, wait_random_min=config.WAIT_RANDOM_MIN,
    #        wait_random_max=config.WAIT_RANDOM_MAX)
    async def crawl_room(self, data):
        """获取二手房信息"""
        async with aiohttp.ClientSession() as session:
            async with session.get(url=data["room_url"], headers=self.HEADERS) as resp:
                if resp.status != 200:
                    print(f"获取所有二手房信息时出错，状态码:{resp.status}，程序退出。")
                    exit()
                room_html = await resp.text()
                return ParseService.room_parse(data, room_html)


if __name__ == '__main__':
    pass
    # url = "https://www.lianjia.com/city/"
    # headers = {'User-Agent': f'{config.USER_AGENT}'}
    # html = requests.get(url=url, headers=headers)
    # txt = ParseService.city_parse(html.text)
    # print(txt)
    # a = CrawlService.crawl_citys()
    # print(a)

    # url = "https://gz.lianjia.com/ershoufang/"
    # headers = {'User-Agent': f'{config.USER_AGENT}'}
    # html = requests.get(url=url, headers=headers)
    # html.encoding = "utf-8"
    # # print(html.text)
    # txt = ParseService.page_parse(html.text)

    # url = "https://aq.lianjia.com/ershoufang/103110742381.html"
    # headers = {'User-Agent': f'{config.USER_AGENT}'}
    # html = requests.get(url=url, headers=headers)
    # html.encoding = "utf-8"
    # # print(html.text)
    # ParseService.room_parse(data=None, html=html.text)


