import os
import sys
import yaml
import json
import pymysql
import logging
import requests
import pytesseract
from PIL import Image
from io import BytesIO
from urllib import parse
from bs4 import BeautifulSoup
from logging.handlers import RotatingFileHandler


def array_filter(data, key=''):
    """列表嵌套字典，根据字典某属性去重"""
    temp_list = []
    temp_list.append(data[0])
    for row in data:
        k = 0
        for item in temp_list:
            if row[key] != item[key]:
                k += 1
            else:
                break
            if k == len(temp_list):
                temp_list.append(row)
    return temp_list


class connect_mysql(object):
    def __init__(self, confif):
        self.config = confif
        self.conn = pymysql.connect(
            host=confif['host'],
            user=confif['user'],
            password=confif['password'],
            database=confif['database'],
            port=confif['port'],
            charset='utf8',
            cursorclass=pymysql.cursors.DictCursor
        )
        self.cursor = self.conn.cursor()

    def execute_sql(self, sql):
        self.cursor.execute(sql)
        self.conn.commit()

    def execute_many_sql(self, sql, data):
        self.cursor.executemany(sql, data)
        self.conn.commit()

    def get_data(self):
        result = []
        data = self.cursor.fetchall()
        if data:
            keys = self.get_fileds()
            for row in data:
                temp_dict = {}
                for key in keys:
                    temp_dict[key] = row[keys[key]]
                result.append()
        return result

    def get_fileds(self):
        index_dict = {}
        i = 0
        for desc in self.cursor.description:
            index_dict[desc[0]] = i
            i += 1
        return index_dict

    def close_mysql(self):
        self.cursor.close()
        self.conn.close()


class Main(object):
    def __init__(self, url, area, line, station):
        self.headers = {
            # "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36",
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36",
            'Origin': 'https://www.ziroom.com',
            'Referer': 'https://www.ziroom.com/'
        }
        self.url = url
        self.html_file = '../../data/ziru.html'
        self.data_cache = '../data/ziru_menu.cache'
        self.area = area
        self.line = line
        self.station = station

    def send_request(self, url):
        """发送 get 请求"""
        try:
            resp = requests.get(url, headers=self.headers, timeout=10)
            return resp
        except Exception as e:
            logging.error('网络请求出错 %s' % e)
            return False

    def update_chache(self, data):
        with open(self.data_cache, 'w') as f:
            f.write(json.dumps(data))

    def get_init_data(self):
        """获取刚进租房页面，获取 按 区域、地铁、通勤筛选 商圈、地铁、通勤时长链接"""

        if not os.path.exists(self.html_file):
            # 请求租房页面
            resp = self.send_request(self.url)
            with open(self.html_file, 'w', encoding='utf-8') as f:
                html = resp.content.decode('utf-8')
                f.write(html)
            html = BeautifulSoup(html, 'lxml')
        else:
            with open(self.html_file, 'r', encoding='utf-8') as f:
                html = BeautifulSoup(f.read(), 'lxml')

        # # 解析页面
        # resp = requests.get(self.url, headers=self.headers, timeout=10)
        # html = BeautifulSoup(resp.content, 'lxml')
        opt_type = {}
        opt_type_nodes = html.find_all('div', class_='opt-type')
        for type_node in opt_type_nodes:
            opt_name = type_node.find('span', class_='opt-name').text
            opt_type[opt_name] = {}
            child_wrapper = type_node.find('div', class_='child-opt')
            wrapper = child_wrapper.find('div', class_='wrapper')

            if wrapper:
                items = wrapper.find_all('a', class_='item')
                for item in items:
                    item_name = item.text
                    item_link = parse.urljoin(self.url, item['href'])
                    opt_type[opt_name][item_name] = {'link': item_link}
        with open(self.data_cache, 'w') as f:
            f.write(json.dumps(opt_type))

    def get_menu_link(self, html):
        """选中地铁获取站名信息链接"""
        grand_child = html.find('div', class_='grand-child-opt')
        if grand_child:
            self.opt_type[self.area][self.line]['child'] = {}
            checkboxs = grand_child.find_all('a', class_='checkbox')
            for checkbox in checkboxs:
                item_name = checkbox.text
                item_link = parse.urljoin(self.url, checkbox['href'])
                self.opt_type[self.area][self.line]['child'][item_name] = item_link
                # 更新缓存数据
            self.update_chache(self.opt_type)

    def parse_html(self, html):
        """解析 html"""
        Z_pages = html.find('div', class_='Z_pages')
        self.now_page = Z_pages.find('a', class_='active').text
        logging.info('正在下载第[%s]页...' % str(self.now_page))
        next_page = Z_pages.find('a', class_='next')
        ## 开始解析页面
        box_node = html.find('div', class_='Z_list-box')
        nodes = box_node.find_all('div', class_='item')
        data_lst = []
        for node in nodes:
            ## 照片层
            link = node.find('div', class_='pic-box')
            if link is None:
                continue
            link = link.find('a')['href']
            if link == 'javascript:;':
                continue
            link = parse.urljoin(self.url, link)
            img_link = parse.urljoin(self.url, node.find('img', class_='lazy')['src'])
            ## 描述层
            info = node.find('div', class_='info-box')
            title_sign = info.find('h5').text
            title_lst = title_sign.split('·')
            zf_type = title_lst[0]
            title_lst = title_lst[-1].split('-')
            title = title_lst[0]
            ws_status = title_lst[-1]

            desc = info.find('div', class_='desc').find_all('div')
            area = desc[0].text
            area_lst = area.split(' | ')
            area = area_lst[0]
            floor = area_lst[-1]

            location = desc[-1].text
            location = location.strip()

            price = ''
            price_lst = []
            price_node = info.find('div', class_='price')
            price_nums = price_node.select('span[class="num"]')
            # https://static8.ziroom.com/phoenix/pc/images/price/new-list/9aef59e0b28bf1225780d84f37520891.png
            for item in price_nums:
                part = item['style']
                url = parse.urljoin(self.url,
                                    part.split(');background-position: ')[0].split('background-image: url(')[1])
                tip = part.split(');background-position: ')[1].split('px')[0]
                ### 减少网络请求
                if not price_lst:
                    ## 将图片ocr识别后返回list，然后根据偏移量求索引
                    price_lst = self.get_price(url)
                number = price_lst[int(abs(float(tip)) // 20)]
                price += number

            tag = info.find('div', class_='tag')
            if tag:
                tag = tag.text
                tag = tag.replace("\n", ",").strip(',')
            else:
                tag = ''
            data_lst.append((zf_type, title, ws_status, area, floor, location, int(price), tag, link, img_link))
        self.save_data(data_lst)

        if next_page:
            self.url = parse.urljoin(self.url, next_page['href'])
            resp = self.send_request(self.url)
            html = BeautifulSoup(resp.content, 'lxml')
            self.parse_html(html)
        else:
            pass

    def save_data(self, data):
        """保存文件 or 数据库都可"""
        insert_sql = "insert into bj_ziroom_zf (zf_type, title, ws_status, area, floor, location, price, tag, link, img_link) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
        self.db.execute_many_sql(insert_sql, data)
        logging.info('第[%s]页插入完毕' % str(self.now_page))

    @staticmethod
    def get_price(image_url):
        """通过图像匹配返回房租价格"""
        # image_url = 'https://static8.ziroom.com/phoenix/pc/images/price/new-list/9aef59e0b28bf1225780d84f37520891.png'
        img_content = requests.get(image_url).content
        image = Image.open(BytesIO(img_content))
        testdata_dir_config = '--tessdata-dir "D:\\Program Files (x86)\\Tesseract-OCR\\tessdata"'
        digital_table = pytesseract.image_to_string(image, config=testdata_dir_config, lang='chi_sim')
        price_list = []
        for i in digital_table:
            price_list.append(i)
        return price_list

    def run(self):
        config_file = open(os.path.dirname(os.path.abspath(__file__)) + '/config.yaml', encoding='utf-8')
        self.config = yaml.safe_load(config_file)
        config_file.close()
        ########### 如有报错 此块注释 #########
        name = 'ziroom_house'
        logging.basicConfig(level=logging.INFO)
        handler = RotatingFileHandler(os.path.dirname(os.path.abspath(__file__)) + '/../logs/%s.log' % name,
                                      maxBytes=134217728, backupCount=7)
        formatter = logging.Formatter('%(asctime)s - %(lineno)d- %(levelname)s - %(message)s')
        handler.setFormatter(formatter)
        logging.getLogger().addHandler(handler)
        requests_log = logging.getLogger("requests.packages.urllib3")
        requests_log.setLevel(logging.ERROR)
        ########### 如有报错 此块注释 #########

        logging.warning('启动 [%s]', name)
        logging.warning('主线程 PID [%s]', os.getpid())
        self.db = connect_mysql(self.config['mysql'])

        if not os.path.exists(self.data_cache):
            self.get_init_data()
        with open(self.data_cache, 'r') as f:
            opt_type = f.read()
            self.opt_type = json.loads(opt_type)
        # 按照地铁-> 6号线
        # 没有该站则重新请求
        if 'child' not in self.opt_type[self.area][self.line] or self.station not in \
                self.opt_type[self.area][self.line]['child']:
            try:
                self.url = self.opt_type[self.area][self.line]['link']
                resp = self.send_request(self.url)
                html = BeautifulSoup(resp.content, 'lxml')
                self.get_menu_link(html)
            except Exception as e:
                logging.info('找不到站点，%s' % e)
                sys.exit(0)
        # # 站名：黄渠
        self.url = self.opt_type[self.area][self.line]['child'][self.station]
        resp = self.send_request(self.url)
        html = BeautifulSoup(resp.content, 'lxml')
        self.parse_html(html)


if __name__ == '__main__':
    info_dict = {
        '上海': {
            'url': 'https://sh.ziroom.com/z/?sort=2&isOpen=0',
            'area': '地铁',
            'line': '9号线',
            'station': '泗泾'
        },
        '北京': {
            'url': 'https://www.ziroom.com/z/?sort=2&isOpen=0',
            'area': '地铁',
            'line': '6号线',
            'station': '常营'
        }
    }
    city_info = info_dict['上海']
    main = Main(city_info['url'], city_info['area'], city_info['line'], city_info['station'])
    main.run()
