#coding:utf-8
import requests
from  bs4 import BeautifulSoup
import  redis
import random
import time
import sqlite3
import re
import threading

class dp_food_data:
    '''
    使用sqlite存储爬取的数据，使用redis的set存储url
    '''

    def __init__(self):
        self.dp_headers = {
                      # 'Host': 'http://www.dianping.com/',
                      # 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36',
                      'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
                      # 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36',
                      'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                      'Accept-Language': 'zh-CN,zh;q=0.9',
                      'Accept-Encoding': 'gzip, deflate',
                      'Referer': 'http://www.dianping.com/',
                      'Cookie': 'lxsdk_cuid=15f720347f8c8-07b41f34e13182-3e64430f-1fa400-15f720347f9c8; _lxsdk=15f720347f8c8-07b41f34e13182-3e64430f-1fa400-15f720347f9c8; _hc.v=37cf4d93-ac4e-44ce-65f1-458f214b84eb.1509446339; s_ViewType=10; aburl=1; cye=zhenjiang; cy=428; _lxsdk_s=15fceffeb3a-0cb-83c-4%7C%7C12',
                      'Connection': 'keep-alive',
                      'If-Modified-Since':'Fri, 22 Aug 2014 10:55:22 GMT',
                      'Cache-Control':'max-age=0'
                      }
        # self.root_url = 'http://www.dianping.com/baoying/food'
        # self.root_url = 'http://www.dianping.com/binhai/food'
        # self.root_url = 'http://www.dianping.com/changzhou/food'
        # self.root_url = 'http://www.dianping.com/dafeng/food'
        # self.root_url = 'http://www.dianping.com/danyang/food'
        # self.root_url = 'http://www.dianping.com/dongtai/food'
        # self.root_url = 'http://www.dianping.com/zhouzhuang/food'
        # self.root_url = 'http://www.dianping.com/zhenjiang/food'
        # self.root_url = 'http://www.dianping.com/zhangjiagang/food'
        self.root_url = 'http://www.dianping.com/yixing/food'
        pool = redis.ConnectionPool(host='127.0.0.1', port=6379,decode_responses=True)
        self.redis_conn = redis.Redis(connection_pool=pool)  #设置decode_responses=True存的时候使用str，否则redis使用字节
        self.conn = sqlite3.connect('dianping_js.db')
        self.search_set = 'search_set'
        self.old_url = 'old_url'
        self.new_shop_set = 'new_shop_set'
        self.old_shop_url = 'old_shop_url'
        self.ip_proxy = 'ip_proxy'

    def get_search(self):
        re_url = re.compile(r'href="([\w,/]+)"')
        res = requests.get(self.root_url,headers = self.dp_headers,timeout = 5)
        soup = BeautifulSoup(res.text,"html.parser")
        phb_url  = soup.find_all('li',class_="term-list-item")
        if phb_url:
            for url in phb_url:
                urls = url.find_all('a')
                for url in urls:
                    url = 'http://www.dianping.com%s'%url.get('href')
                    # print(url)
                    self.redis_conn.sadd('search_set',url)
                    # self.search_set.add(url)
        # 获取商区 菜系 地表 的url
        tags = soup.find('script', class_='J_auto-load')
        if tags:
            tag = tags.prettify()  # 将soup对象格式化为字符串 并通过正则表达式取出a标签的href
            urls = re_url.findall(tag)
            for url in urls:
                url = 'http://www.dianping.com%s' % url
                # shop_serach_set.add(url)
                # print(url)
                self.redis_conn.sadd('search_set', url)
        # 获取氛围的列表url
        tags_list = soup.find_all('ul', class_='nc_list')
        if tags_list:
            for tag in tags_list:
                tag = tag.find_all('li')
                for url in tag:
                    url = url.find('a').get('href')
                    shop_serach = 'http://www.dianping.com%s' % url
                    # print(shop_serach)
                    self.redis_conn.sadd('search_set', url)

    def get_shop(self):
        while  self.redis_conn.scard(self.search_set):
            search_url = self.redis_conn.spop(self.search_set)
            self.get_shop_url(search_url)

    def get_shop_url(self,url):
        try:
            print('>>>>>>>获取%s页面店铺信息' % url)
            self.redis_conn.sadd(self.old_url, url)
            res = requests.get(url, headers=self.dp_headers, timeout=2)
            # 如果爬取网页不失败主动抛出异常
            if res.status_code != 200:
                raise Exception('网站爬取错误，错误代码为', res.status_code)
            soup = BeautifulSoup(res.text, "html.parser")
            # 获取shop详情页的URL
            tags = soup.find_all('div', class_='pic')  # 得到餐馆列表的div，类型为tag
            if tags:
                for tag in tags:
                    # print(tag)
                    page_url = tag.find('a')
                    shop_url = page_url.get('href')
                    # print(shop_url)
                    if self.redis_conn.sadd(self.new_shop_set, shop_url):
                        print('添加到new_shop_set >>>', shop_url)

            # 获取下一页的URL
            next_urls = soup.find('a', class_="next")
            if next_urls:
                next_url = next_urls.get('href')
                print('获取下一页信息', next_url)
                self.get_shop_url(next_url)

        except Exception as e:
            self.redis_conn.sadd(self.search_set, url)
            print(e,url)
        finally:
            time.sleep(random.randint(1,3))


    def get_shop_info(self):
        while self.redis_conn.scard(self.new_shop_set):
            try:
                url = self.redis_conn.spop(self.new_shop_set)
                # user_agent = [
                #     "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
                #     "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
                #     "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;",
                #     "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
                #     "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
                #     "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
                #     "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv,2.0.1) Gecko/20100101 Firefox/4.0.1",
                #     "Mozilla/5.0 (Windows NT 6.1; rv,2.0.1) Gecko/20100101 Firefox/4.0.1",
                #     "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
                #     "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
                #     "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
                #     "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
                #     "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
                #     "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
                #     "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
                #     "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
                #     "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
                #     "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)"
                # ]

                # 每次随机取一个user_agent

                user_agent = [
                    "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
                    "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
                    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;",
                    "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
                    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
                    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
                    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv,2.0.1) Gecko/20100101 Firefox/4.0.1",
                    "Mozilla/5.0 (Windows NT 6.1; rv,2.0.1) Gecko/20100101 Firefox/4.0.1",
                    "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
                    "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
                    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
                    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
                    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
                    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
                    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
                    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
                    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
                    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",

                    "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
                    "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
                    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
                    "Opera/9.25 (Windows NT 5.1; U; en)",
                    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
                    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
                    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
                    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
                    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
                    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)",
                    "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
                    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
                    "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
                    "Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10",
                    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
                    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
                    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
                    "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12",
                    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)",
                    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
                    "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36"
                ]
                self.dp_headers['User-Agent'] = random.choice(user_agent)
                self.dp_headers['Referer'] = url
                print(self.dp_headers)
                res = requests.get(url, headers=self.dp_headers, timeout=3)
                # 如果爬取网页不失败主动抛出异常
                if res.status_code != 200:
                    raise Exception('网站爬取错误，错误代码为', res.status_code)

                soup = BeautifulSoup(res.text,"html.parser")
                shop_name = soup.find(class_='shop-name')
                if shop_name:
                    shop_name = shop_name.get_text()
                    # print(shop_name.split()[0])
                    shop_name = shop_name.split()[0]  # 只截取店铺名称
                else:
                    raise Ellipsis('店铺名称爬去失败',url)
                shop_per_capita = soup.find(id='avgPriceTitle')  # 人均价格
                if shop_per_capita:
                    shop_per_capita = shop_per_capita.get_text()
                    # print(shop_per_capita)
                else:
                    shop_per_capita = '暂无'

                services = soup.find(id='comment_score')  # 获得口味、环境、服务等评分
                if services:
                    services = services.get_text()
                    list = services.split()
                    shop_taste = list[0]  # 口味评分
                    shop_environment = list[1]  # 环境评分
                    shop_service = list[2]  # 服务评分
                else:
                    shop_taste = '暂无'
                    shop_environment = '暂无'
                    shop_service = '暂无'

                address = soup.find(class_='expand-info address')  # 获取地址
                if address:
                    address = address.get_text()
                    address = address.split()
                    shop_address = ''
                    for i in address:
                        shop_address = shop_address + ' ' + i
                else:
                    shop_address = '暂无'

                tel = soup.find(class_='expand-info tel')  # 商家电话
                if tel:
                    tel = tel.get_text()
                    tel = tel.split()
                    shop_tel = ''
                    for i in tel:
                        shop_tel = shop_tel + ' ' + i
                else:
                    shop_tel = '暂无'

                job_summary = soup.find_all(class_='info info-indent')  # 获取营业时间 和简介
                temp_list = []
                if job_summary:
                    for i in job_summary:
                        temp = i.get_text().split()
                        j_s = ''
                        for a in temp:
                            j_s = j_s + ' ' + a
                        temp_list.append(j_s)
                    # print(temp_list)
                    shop_job = temp_list[0]
                    if len(temp_list) >1:
                        shop_summary = temp_list[1]
                    else:
                        shop_summary = '暂无'
                else:
                    shop_job = '暂无'
                    shop_summary = '暂无'

                shop_url = url  # 餐厅URL
                data =[ shop_name, shop_per_capita, shop_taste, shop_environment, shop_service, shop_address, shop_tel, shop_job, shop_summary, shop_url]

                #将详细信息保存至数据库中
                self.redis_conn.sadd(self.old_shop_url, url)
                cur = self.conn.cursor()
                # sql = "INSERT INTO js_food (shop_name,shop_per_capita ,shop_taste,shop_environment,shop_service,shop_address,shop_tel,shop_job,shop_summary,page_url)VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
                sql = "INSERT INTO js_food (shop_name,shop_per_capita ,shop_taste,shop_environment,shop_service,shop_address,shop_tel,shop_job,shop_summary,page_url)VALUES('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')" % (
                data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9])
                # sql = "INSERT INTO js_food (shop_name,shop_per_capita ,shop_taste,shop_environment,shop_service,shop_address,shop_tel,shop_job,shop_summary,page_url)VALUES("%s","%s","%s",'%s','%s','%s','%s','%s','%s','%s')" % (
                # data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9])

                # print(sql)
                print(data)
                cur.execute(sql)
                self.conn.commit()
                print('数据保存成功',data[0], data[9])

                time.sleep(random.randint(3,5))
            except Exception as e:

                print('get_shop_info()Exception >>>',res.status_code,e,url)
                self.redis_conn.sadd(self.new_shop_set, url)
                time.sleep(random.randint(3,5))
                continue
        # cur.close()
        self.conn.close()

if __name__ == '__main__':
    food = dp_food_data()
    # food.get_search()
    # food.get_shop()
    food.get_shop_info()
