import pymysql
import re
import requests
from multiprocessing.dummy import Pool as ThreadPool
import datetime


class XLY(object):
    def __init__(self):
        self.host = '127.0.0.1'
        self.db = 'app_mark'
        self.user = 'root'
        self.passwd = '123456'
        self.charset = 'utf8mb4'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'
        }
        self.start = datetime.datetime.now()

    def get_data(self):
        # 从gly表中拿链接
        con = pymysql.connect(host=self.host, db=self.db, user=self.user, passwd=self.passwd, charset=self.charset)
        cur = con.cursor()
        sql = 'select link from gly where tag = "1" and sitename="驴妈妈旅游"'
        after_sql = 'update gly set tag="1" where tag="0" and sitename = "驴妈妈旅游"'
        try:
            cur.execute(sql)
            results = cur.fetchall()
            cur.execute(after_sql)
        except Exception as e:
            con.rollback()
            results = None
            print('error~', e)
        else:
            con.commit()
        cur.close()
        con.close()
        return results

    def parse_data(self, url):
        # 正则匹配各个字段
        print(url)
        url = url[0]
        # 匹配id
        id = url.split('/')[-1]
        id = re.sub('\?.*', '', id)
        # print(id)
        response = requests.get(url, headers=self.headers)
        html = response.text
        if 'scenic' not in url and 'hotel' not in url:
            # 去掉酒店和景点
            # 匹配标题
            title = re.findall('<h.*?tit">(.*?)</h.*?>', html, re.S)
            if title:
                title = title[0]
                title = re.sub('\n|\r|&nbsp;|自营|<[\s\S]*?>', '', title)
                title = title.strip()
            else:
                title = re.findall('<p class="nchtitle">(.*?)</p>', html, re.S)
                if title:
                    title = title[0]
                    title = re.sub('\n|\r|&nbsp;|自营|<[\s\S]*?>', '', title)
                    title = title.strip()
            # 匹配价格
            price = re.findall('<dfn.*?>(\d+)</dfn>', html, re.S)
            if price:
                price = price[0]
            else:
                price = re.findall('<span class="product_price">.*?(\d+).*?</span>', html, re.S)
                if price:
                    price = price[0]
                else:
                    price = re.findall('￥<em>(\d+)</em>', html, re.S)
                    if price:
                        price = price[0]
                    else:
                        price = re.findall('<span class="product-price-value">.*?(\d+).*?</span>', html, re.S)
                        if price:
                            price = price[0]
                        else:
                            price = None
            # 匹配好评率
            praise = re.findall('<p class="product_top_dp">[\s\S]*?<span>([\s\S]*?)</span>[\s\S]*?</p>', html, re.S)
            if praise:
                praise = praise[0]
                praise = re.sub('<.*?>', '', praise)
                praise = praise.strip()
            else:
                praise = re.findall('<a href="#pro_comment".*?<span>([\s\S]*?)</span>', html, re.S)
                if praise:
                    praise = praise[0]
                else:
                    praise = re.findall('<span class="c_f60">([\s\S]*?)</span>', html, re.S)
                    if praise:
                        praise = praise[0]
                        praise = praise.strip()
                    else:
                        praise = re.findall('<p class="product_top_dp">[\s\S]*?<span>([\s\S]*?)<small>%</small>[\s\S]*?</span>', html, re.S)
                        if praise:
                            praise = praise[0]
                            praise = praise.strip()
                        else:
                            praise = re.findall('<span class="val">([\s\S]*?)</span>', html, re.S)
                            if praise:
                                praise = praise[0]
            if praise:
                if '%' in praise:
                    praise = re.sub('%', '', praise)
                praise = float(praise)
                if praise > 100:
                    praise = None
                    print('好评率抓取错误')
                else:
                    pass
            else:
                praise = None
            # 匹配出发地
            starting_city = re.findall('<dl class="info-city">[\s\S]*?出发城市[\s\S]*?<ii>([\s\S]*?)</ii></dd>', html, re.S)
            target_city = re.findall('<dt>目的地[\s\S]*?<dd>([\s\S]*?)</dd>', html, re.S)
            if starting_city:
                starting_city = starting_city[0]
                starting_city = re.sub('<.*?>', '', starting_city)
                # 匹配目的地
                target_city = target_city[0]
                target_city = re.sub('<.*?>', '', target_city)
                # 匹配天数
                days_spent = re.findall('<dt>出游天数[\s\S]*?<dd>([\s\S]*?)</dd>', html, re.S)[0]
                days_spent = re.sub('<.*?>', '', days_spent)
                # print(days_spent)
            else:
                starting_city = target_city = days_spent = None
            # 匹配类型
            type_ = re.findall('<i class="t-category">([\s\S]*?)</i>', html, re.S)
            if type_:
                type_ = type_[0]
            else:
                type_ = re.findall('<span class="product_top_type product_type_zyx">([\s\S]*?)</span>', html, re.S)
                if type_:
                    type_ = type_[0]
                else:
                    type_ = re.findall('<span class="dpn_group">([\s\S]*?)</span>', html, re.S)
                    if type_:
                        type_ = type_[0]
                    else:
                        type_ = None
            # print(type_)
            list_data = [id, title, price, praise, starting_city, target_city, days_spent, type_, url]
            self.save_data(list_data)

    def save_pic(self, url):
        # 爬取图片
        url = url[0]
        if 'scenic' not in url and 'hotel' not in url:
            # 匹配id
            id = url.split('/')[-1]
            id = re.sub('\?.*', '', id)
            # print(id)
            # print(url)
            response = requests.get(url, headers=self.headers)
            html = response.text
            pic_urls = re.findall('<img src="(.*?/pc/.*?\.jpg)"', html)
            for pic_url in pic_urls:
                if 'to' in pic_url:
                    pic_url = re.findall('to="(.*?.jpg)', pic_url)
                if 'http' in pic_url:
                    list_data = [id, pic_url, url]
                    self.save_pic_url(list_data)

    def save_pic_url(self, list_data):
        con = pymysql.connect(host=self.host, db=self.db, user=self.user, passwd=self.passwd, charset=self.charset)
        cur = con.cursor()
        sql = 'insert into lvmama_pic(id_num, pic_url, url) values (%s, %s, %s)'
        try:
            cur.execute(sql, list_data)
            print('success')
        except Exception as e:
            con.rollback()
            print('error~', e)
        else:
            con.commit()
        cur.close()
        con.close()

    def save_data(self, list_data):
        # 写入数据库
        con = pymysql.connect(host=self.host, db=self.db, user=self.user, passwd=self.passwd, charset=self.charset)
        cur = con.cursor()
        sql = 'insert into lvmama(id_num, title, price, praise, starting_city, target_city, days_spent, type_, link) values (%s, %s, %s, %s, %s, %s, %s, %s, %s)'
        # cur.execute(sql, list_data)
        # con.commit()
        try:
            cur.execute(sql, list_data)
            print('insert success')
        except Exception as e:
            con.rollback()
            print('error~', e)
        else:
            con.commit()
        cur.close()
        con.close()


if __name__ == '__main__':
    xly = XLY()
    urls = xly.get_data()

    # for url in urls:
    #     xly.save_pic(url)
    if urls:
        # 开启多线程
        pool = ThreadPool(20)
        pool.map(xly.save_pic, urls)
        pool.close()
        pool.join()
    end = datetime.datetime.now()
    print('耗时:', (end-xly.start))



    # for url in urls:
    #     url = url[0]
    #     xly.parse_data(url)
        # break
