# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/05/07 00:40
# @Author  : xwt
# @File    : google_store.py
# @Desc    : 谷歌商店apps、games详情数据爬虫
import requests
from bs4 import BeautifulSoup
from dbutils.pooled_db import PooledDB
import pymysql
# import mysql.connector.pooling
import time
import logging
from logging.handlers import TimedRotatingFileHandler
import threading
import random
import redis
from redis.connection import ConnectionPool
import urllib3
from queue import Queue
import json

# logging.basicConfig(filename='/data/google_shell/google_store.log', level=logging.INFO, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
# 创建Logger对象
logger = logging.getLogger('my_logger')
logger.setLevel(logging.INFO)

# 创建TimedRotatingFileHandler，设置时间间隔为每天，并指定归档日志文件的路径和文件名,midnight:每天午夜切割,backupCount:定义保留的归档日志文件数量。在达到指定数量后，较早的归档文件将被删除
handler = TimedRotatingFileHandler('./google_store.log', when='midnight', interval=1, backupCount=5)

# 设置日志记录格式
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)

# 将handler添加到logger
logger.addHandler(handler)
# 强制取消基于证书验证的级别警告，控制台会打印一大堆这个警告覆盖了有效信息
urllib3.disable_warnings()

# 待处理的集合
WAIT_DEAL_GAME_KEY='wait_deal_game_id'
WAIT_DEAL_APP_KEY='wait_deal_app_id'

# 已处理的集合
IDS_SET_GAME_KEY='ids_set_game'
IDS_SET_APP_KEY='ids_set_app'

#host为Redis服务器地址，默认是localhost,注意随时看数据库配置的ip，java项目改成读数据库host了，port为Redis服务的端口号，默认是6379，db为选择的数据库，默认是0。
redisPool = ConnectionPool(host='127.0.0.1', port=6379,db = 6,password='Fjs2dsl(*@')
# 从连接池中取出一个 Redis 连接
r = redis.Redis(connection_pool=redisPool)

# queueApp = Queue() # 存储函数apps处理结果的队列
# queueGame = Queue() # 存储函数games处理结果的队列

pool = PooledDB(
    creator=pymysql,  # 使用 PyMySQL 作为连接池的库
    maxconnections=0,    # 连接池允许的最大连接数，0和None表示不限制连接数
    mincached=10,    # 初始化时，链接池中至少创建的空闲的链接，0表示不创建
    maxcached=20,    # 链接池中最多闲置的链接，0和None不限制
    blocking=True,  # 连接池中如果没有可用连接后，是否阻塞等待。True，等待；False，不等待然后报错
    maxusage=1,  # 一个链接最多被重复使用的次数，None表示无限制
    setsession=[],  # 开始会话前执行的命令列表。
    ping=0,
    host='127.0.0.1', port=3306,
    user='ddup', password='Fj#4jd*3nM$P', database='ads', charset='utf8mb4'
)

conn = pool.connection()
cursor = conn.cursor()

main_params = {'hl': 'en'}
# 增加ip代理获取其他地区的应用数据，使用格式：response = requests.get('http://example.com', proxies=proxies)
proxies = {
    'http': 'http://8.209.79.168:3128',
    'https': 'https://8.209.79.168:3128'
}

# 设置代理策略，伪装为用户操作,搭配random.choice(USER_AGENTS)使用
USER_AGENTS = [
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60',
        'Opera/8.0 (Windows NT 5.1; U; en)',
        'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',
        'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50',
        'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',
        'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
        'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',
        'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)',
        'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 SE 2.X MetaSr 1.0',
        'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; SE 2.X MetaSr 1.0)',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 UBrowser/4.0.3214.0 Safari/537.36'
    ]

# 生成随机UA
def genrateUserAgent():
    return {'User-Agent':random.choice(USER_AGENTS)}

 # 避免使用持久连接
requests.adapters.DEFAULT_RETRIES = 5

# 全局线程锁
lock = threading.Lock()

# 乐观锁+事务实现原子性操作和数据隔离
def cache_data(set_name, data):
    with r.pipeline() as pipeline:
        while True:
            try:
                # 监视 set_name 集合，以便在事务执行期间检测到其他线程的修改
                pipeline.watch(set_name)

                # 检查 set_name 是否已被其他线程修改
                if r.sismember(set_name, data):
                    pipeline.unwatch()
                    return  # 数据已存在，放弃缓存

                # 开始事务
                pipeline.multi()
                pipeline.sadd(set_name, data)

                # 执行事务
                pipeline.execute()
                break

            except redis.WatchError:
                continue

# apps、games通用解析
def parsing_soup(app_id,detail_soup,appType):
    if app_id == None:
        logger.error("参数错误,{a}----------------->>".format(a=app_id))
        return
    # 解析应用信息并插入数据库
    logger.info("{a}-正在处理的app_id：{c}".format(a=time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()),c=app_id))
    try:
        lock.acquire()
        title = ""
        if detail_soup.find('h1', {'itemprop': 'name'}) != None or detail_soup.find('div', class_='vWM94c') != None:
            try:
                # 程序异常停掉时，很大可能是标题解析出问题，如有非英文的：زد رصيدك
                title = detail_soup.find('h1', {'itemprop': 'name'}).contents[0].string.strip() # 进行替换
            except Exception as e:
                title=detail_soup.find('div', class_='vWM94c')[0].string.strip()
                logger.error("解析详情标题出异常,{a},app_id:{b}-------------------》》：".format(a=e,b=app_id))
        emailAndAddressR = detail_soup.find('div', {'class': 'pSEeg'})
        # print("email------------------->>>",emailAndAddressR)
        email = ""
        if (emailAndAddressR != None):
            # 校验邮箱合法性，详情页面app邮箱地址没有设置占位符，有可能会获取到其他值
            try:
                if len(emailAndAddressR) >= 1 and "@" in detail_soup.find_all('div',class_='pSEeg')[1].string.strip():
                    email = detail_soup.find_all('div',class_='pSEeg')[1].string.strip()
                else:
                    email = detail_soup.find_all('div',class_='pSEeg')[0].string.strip()
            except Exception as e:
                email = detail_soup.find_all('div',class_='pSEeg')[0].string.strip()
                logger.error("verify_email error,{a}".format(a=e))
        address = ""
        if emailAndAddressR != None:
            try:
                for div in detail_soup.find_all('div',class_='pSEeg'):
                    if "@" not in div.string.strip() and "http" not in div.string.strip():
                        address = div.string.strip()
            except Exception as e :
                logger.error("获取address异常{address}".format(address=e))
        reviewsR = detail_soup.find_all('div', {'class': 'g1rdde'})
        reviews = ""
        if (reviewsR != None and len(reviewsR) >= 2):
            try:
                if (detail_soup.find('div', {'class': 'g1rdde'}).string.strip() != 'Downloads'):
                    reviews = detail_soup.find('div', {'class': 'g1rdde'}).string.strip()
            except Exception as e:
                logger.error("get detail reviews exception,{e}---------->>".format(a=e))

        downloadR = detail_soup.find_all('div', {'class': 'ClM7O'})
        download = ""
        if (downloadR != None and len(downloadR) > 1):
            try:
                if len(downloadR) == 2:
                    download = detail_soup.find_all('div', {'class': 'ClM7O'})[0].string.strip()
                elif len(downloadR) == 3:
                    download = detail_soup.find_all('div', {'class': 'ClM7O'})[1].string.strip()
            except Exception as e:
                # 第一轮循环结束，将缓存中的数据写入库
                download = detail_soup.find_all('div', {'class': 'ClM7O'})[0].string.strip()
                logger.error("get detail download exception,{appId},{a}---------->>".format(appId=app_id,a=e))

        icornR = detail_soup.find('img', {'class': 'T75of'})
        icorn = ""
        if (icornR != None):
            try:
                icorn = detail_soup.find('img', {'class': 'T75of'})['src']
            except Exception as e:
                logger.error("获取icorn异常,{a}".format(a=e))
        freeTagR = detail_soup.find('span', {'class': 'UIuSk'})
        freeTag = ""
        if (freeTagR != None):
            try:
                freeTag = detail_soup.find('span', {'class': 'UIuSk'}).string.strip()
            except Exception as e:
                logger.error("获取freeTag异常,{a}".format(a=e))
        classifyR = detail_soup.find('h2', {'class': 'XfZNbf'})
        classify = ""
        if (classifyR != None):
            try:
                if 'About this app' in classifyR:
                    classify = 'apps'
                elif 'About this game' in classifyR:
                    classify = 'games'
            except Exception as e:
                logger.error("获取classify异常,{a}".format(a=e))
        temp = (app_id,title,email,reviews,download,icorn,freeTag,classify,address)
        app_id,title,email,reviews,download,icorn,freeTag,classify,address = '','','','','','','','',''
        return temp
    except Exception as error:
        logger.error("app解析页面内容异常{e}".format(e=error))
    finally:
        lock.release()

# 爬取商店中所有的apps信息
def deal_apps_data():
    # print("+++++++++++++++++++++++++正在爬取apps的信息+++++++++++++++++++++++")
    # 获取页面内容
    url = 'https://play.google.com/store/apps'
    # requests设置移除SSL认证
    try:
        r = requests.get(url,headers=genrateUserAgent(),params=main_params,verify=False)
    except Exception as e:
        logger.error("无法连接到目标服务器，正在尝试重连,{a}".format(a=e))
        time.sleep(3)
        r = requests.get(url,headers=genrateUserAgent(),params=main_params,verify=False)
    # 解析页面
    soup = BeautifulSoup(r.text, 'html.parser')
    if soup == None:
        logger.error("解析页面结果为none")
        return
    return soup

def deal_apps_detail(soup):
    try:
        data = []
        # 优先处理redis缓存中的数据，避免程序挂掉又要重新爬速度太慢
        for a in soup.find_all('a', href=True):
            # 判断是否为应用详情页的链接
            if '/store/apps/details?id=' in a['href']:
                app_id = a['href'].split('=')[1]
                time.sleep(1)
                # 请求应用详情页内容
                detail_url = f'https://play.google.com{a["href"]}'
                try:
                    detail_r = requests.get(detail_url,headers=genrateUserAgent(),params=main_params,verify=False)
                except Exception as e:
                    logger.error("无法连接到目标服务器，正在尝试重连,{a}".format(a=e))
                    time.sleep(3)
                    detail_r = requests.get(detail_url,headers=genrateUserAgent(),params=main_params,verify=False)
                detail_soup = BeautifulSoup(detail_r.text, 'html.parser')
                if detail_soup == None:
                    logger.error("解析页面结果为none")
                    continue
                # 判断是否属于名为 ids_set_app 的Set集合,存在说明处理过了,不能跳过本次循环，只是不再执行解析和insert，但还是要遍历详情中其他apps，避免有新增推荐
                if query_to_database(app_id) == 0:
                    # 解析页面数据
                    temp = parsing_soup(app_id,detail_soup,'apps')
                    data.append(temp)
                    save_to_database(app_id,data,'apps')
                    data = []
                    temp = ''
                # 如果详情中还包含Similar apps、More by LINE Corporation，递归再次获取详情中的其他详情数据
                for b in detail_soup.find_all('a', href=True):
                    if '/store/apps/details?id=' in b['href']:
                        another_app_id = b['href'].split('=')[1]
                        # 不存在则先写入redis待处理集合中
                        if query_to_database(another_app_id) == 0:
                            cache_data(WAIT_DEAL_APP_KEY,another_app_id)

        # 第一轮循环结束，将缓存中的数据写入库
        deal_app_data_by_redis()
    except Exception as error:
        logger.error("apps详情处理出错,{appId},{a}".format(appId=app_id,a=error))

# 爬取商店中所有的games信息
def deal_games_data():
    # 获取页面内容
    url = 'https://play.google.com/store/games'
    # requests设置移除SSL认证
    try:
        r = requests.get(url,headers=genrateUserAgent(),params=main_params,verify=False)
    except Exception as e:
        logger.error("无法连接到目标服务器，正在尝试重连,{a}".format(a=e))
        time.sleep(3)
        r = requests.get(url,headers=genrateUserAgent(),params=main_params,verify=False)
    # 解析页面
    soup = BeautifulSoup(r.text, 'html.parser')
    if soup == None:
        logger.error("解析页面结果为none")
        return
    return soup

# web页面数据处理方法
def deal_games_detail(soup):
    try:
        data = []
        for a in soup.find_all('a', href=True):
            # 判断是否为应用详情页的链接
            if '/store/apps/details?id=' in a['href']:
                app_id = a['href'].split('=')[1]
                time.sleep(1)
                # 请求应用详情页内容
                detail_url = f'https://play.google.com{a["href"]}'
                try:
                    detail_r = requests.get(detail_url,headers=genrateUserAgent(),params=main_params,verify=False)
                except Exception as e:
                    logger.error("无法连接到目标服务器，正在尝试重连,{a}".format(a=e))
                    time.sleep(3)
                    detail_r = requests.get(detail_url,headers=genrateUserAgent(),params=main_params,verify=False)
                # time.sleep(1)
                detail_soup = BeautifulSoup(detail_r.text, 'html.parser')
                if detail_soup == None:
                    logger.error("解析页面结果为none")
                    continue
                # 判断 app_id 是否属于名为 ids_set_game 的Set集合,存在说明处理过了,不能跳过本次循环，只是不再执行解析和insert，但还是要遍历详情中其他apps，避免有新增推荐
                if query_to_database(app_id) == 0:
                    # 解析页面数据
                    temp = parsing_soup(app_id,detail_soup,'games')
                    data.append(temp)
                    save_to_database(app_id,data,'games')
                    data = []
                    temp = ''
                # 如果详情中还包含Similar apps、More by LINE Corporation，递归再次获取详情中的其他详情数据
                for b in detail_soup.find_all('a', href=True):
                    if '/store/apps/details?id=' in b['href']:
                        another_app_id = b['href'].split('=')[1]
                        # 不存在则先写入redis待处理集合中
                        if query_to_database(another_app_id) == 0:
                            cache_data(WAIT_DEAL_GAME_KEY,another_app_id)
        # 第一轮循环结束，将缓存中的数据写入库
        deal_game_data_by_redis()
    except Exception as error:
        logger.error("game详情处理异常{e}".format(e=error))

def deal_search_detail():
    try:
        data = []
        redisClient = redis.Redis(connection_pool=redisPool)
        values = redisClient.lrange('key:word:list', 0, -1)
        for item in values:
            keyword = item.decode('utf-8')
            logger.info('正在通过搜索关键词【{a}】获取数据中。。。。'.format(a=keyword))
            url = 'https://play.google.com/store/search'+ '?q=' + keyword + '&c=apps'
            r = requests.get(url,headers=genrateUserAgent(),params=main_params,verify=False)
            time.sleep(1)
            soup = BeautifulSoup(r.text, 'html.parser')
            if soup == None:
                logger.error("解析页面结果为none")
            for a in soup.find_all('a', href=True):
                # 判断是否为应用详情页的链接
                if '/store/apps/details?id=' in a['href']:
                    app_id = a['href'].split('=')[1]
                    # 请求应用详情页内容
                    detail_url = f'https://play.google.com{a["href"]}'
                    try:
                        detail_r = requests.get(detail_url,headers=genrateUserAgent(),params=main_params,verify=False)
                    except Exception as e:
                        logger.error("无法连接到目标服务器，正在尝试重连,{a}".format(a=e))
                        time.sleep(3)
                        detail_r = requests.get(detail_url,headers=genrateUserAgent(),params=main_params,verify=False)
                    time.sleep(1)
                    detail_soup = BeautifulSoup(detail_r.text, 'html.parser')
                    if detail_soup == None:
                        logger.error("解析页面结果为none")
                        continue
                    if query_to_database(app_id) == 0:
                        # 解析页面数据
                        temp = parsing_soup(app_id,detail_soup,'')
                        data.append(temp)
                        save_to_database(app_id,data,'apps')
                        data = []
                        temp = ''
                    # 删除关键词列表中当前处理的，避免程序突然挂掉重头开始处理
                    redisClient.lrem('key:word:list', 0, item)
                    # 如果详情中还包含Similar apps、More by LINE Corporation，递归再次获取详情中的其他详情数据
                    for b in detail_soup.find_all('a', href=True):
                        if '/store/apps/details?id=' in b['href']:
                            another_app_id = b['href'].split('=')[1]
                            # 不存在则先写入redis待处理集合中
                            if query_to_database(another_app_id) == 0:
                                cache_data(WAIT_DEAL_APP_KEY,another_app_id)
        # 避免中途有新录入数据没有读取到，中间清除再重新写入缓存
        time.sleep(1)
        redisClient.delete('key:word:list')
        query_keyword()
        deal_search_detail()
    except Exception as error:
        logger.error("search详情处理出错,{a}".format(a=error))

def deal_app_data_by_redis():
    try:
        data = []
        set_members = r.smembers(WAIT_DEAL_APP_KEY)
        members = [member.decode() for member in set_members]
        for appId in members:
            logger.info("deal_app_data_by_redis 方法,appId和待处理set_name-----》》》:{a},{b}".format(a=appId,b=WAIT_DEAL_APP_KEY))
            time.sleep(1)
            # 请求应用详情页内容
            detail_url = f'https://play.google.com/store/apps/details?id={appId}&hl=en'
            try:
                detail_r = requests.get(detail_url,headers=genrateUserAgent(),params=main_params,verify=False)
            except Exception as e:
                logger.error("无法连接到目标服务器，正在尝试重连,{a}".format(a=e))
                time.sleep(3)
                detail_r = requests.get(detail_url,headers=genrateUserAgent(),params=main_params,verify=False)
            # time.sleep(1)
            detail_soup = BeautifulSoup(detail_r.text, 'html.parser')
            if detail_soup == None:
                continue
            # 将当前循环中待处理的移除出去
            if r.sismember(WAIT_DEAL_APP_KEY,appId):
                r.srem(WAIT_DEAL_APP_KEY,appId)
            # 存在说明处理过了,不能跳过本次循环，只是不再执行解析和insert，但还是要遍历详情中其他apps，避免有新增推荐
            if query_to_database(appId) == 0:
                # 解析页面数据
                temp = parsing_soup(appId,detail_soup,'apps')
                data.append(temp)
                save_to_database(appId,data,'apps')
                data = []
                temp = ''
            # 如果详情中还包含Similar apps、More by LINE Corporation，递归再次获取详情中的其他详情数据
            for b in detail_soup.find_all('a', href=True):
                if '/store/apps/details?id=' in b['href']:
                    another_app_id = b['href'].split('=')[1]
                    # 不存在则先写入redis待处理集合中
                    if query_to_database(another_app_id) == 0:
                        cache_data(WAIT_DEAL_APP_KEY,another_app_id)
        time.sleep(1)
        if r.scard(WAIT_DEAL_APP_KEY) != 0:
            deal_app_data_by_redis()
        else:
            endTime = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime())
            appCnt = r.scard(IDS_SET_APP_KEY)
            appWaitCnt = r.scard(WAIT_DEAL_APP_KEY)
            logger.info("{c}:apps缓存数据本轮处理结束，总处理apps条数和apps剩余待处理条数分别为:{a},{b}".format(a=appCnt,b=appWaitCnt,c=endTime))
            # 发起新的循环，也就是手动创建死循环
            if r.exists(IDS_SET_APP_KEY):
                r.delete(IDS_SET_APP_KEY)
            dataSoup = deal_apps_data()
            time.sleep(2)
            deal_apps_detail(dataSoup)
    except Exception as error:
        logger.error("app二次处理详情异常,{e}".format(e=error))

def deal_game_data_by_redis():
    try:
        data = []
        set_members = r.smembers(WAIT_DEAL_GAME_KEY)
        members = [member.decode() for member in set_members]
        for appId in members:
            logger.info("deal_game_data_by_redis 方法,appId和待处理set_name-----》》》:{a},{b}".format(a=appId,b=WAIT_DEAL_GAME_KEY))
            time.sleep(1)
            # 请求应用详情页内容
            detail_url = f'https://play.google.com/store/apps/details?id={appId}&hl=en'
            try:
                detail_r = requests.get(detail_url,headers=genrateUserAgent(),params=main_params,verify=False)
            except Exception as e:
                logger.error("无法连接到目标服务器，正在尝试重连,{a}".format(a=e))
                time.sleep(3)
                detail_r = requests.get(detail_url,headers=genrateUserAgent(),params=main_params,verify=False)
            # time.sleep(1)
            detail_soup = BeautifulSoup(detail_r.text, 'html.parser')
            if detail_soup == None:
                continue
            # 将当前循环中待处理的移除出去
            if r.sismember(WAIT_DEAL_GAME_KEY,appId):
                r.srem(WAIT_DEAL_GAME_KEY,appId)
            # 存在说明处理过了,不能跳过本次循环，只是不再执行解析和insert，但还是要遍历详情中其他apps，避免有新增推荐
            if query_to_database(appId) == 0:
                # 解析页面数据
                temp = parsing_soup(appId,detail_soup,'games')
                data.append(temp)
                save_to_database(appId,data,'games')
                data = []
                temp = ''
            # 如果详情中还包含Similar apps、More by LINE Corporation，递归再次获取详情中的其他详情数据
            for b in detail_soup.find_all('a', href=True):
                if '/store/apps/details?id=' in b['href']:
                    another_app_id = b['href'].split('=')[1]
                    # 不存在则先写入redis待处理集合中
                    if query_to_database(another_app_id) == 0:
                        cache_data(WAIT_DEAL_GAME_KEY,another_app_id)
        time.sleep(1)
        if r.scard(WAIT_DEAL_GAME_KEY) != 0:
             deal_game_data_by_redis()
        else:
            endTime = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime())
            gameCnt = r.scard(IDS_SET_GAME_KEY)
            gameWaitCnt = r.scard(WAIT_DEAL_GAME_KEY)
            logger.info("{c}:games缓存数据本轮处理结束，总处理games条数和games剩余处理条数分别为:{a},{b}".format(a=gameCnt,b=gameWaitCnt,c=endTime))
            # 发起新的循环，也就是手动创建死循环
            if r.exists(IDS_SET_GAME_KEY):
                r.delete(IDS_SET_GAME_KEY)
            dataSoup = deal_games_data()
            time.sleep(2)
            deal_games_detail(dataSoup)
    except Exception as error:
        logger.error("game二次解析详情异常,{e}".format(e=error))

def save_to_database(appId,data,classify):
    ret = ''
    try:
        lock.acquire()
        insert_sql = f"""REPLACE INTO apps_info (app_id, title, email, reviews, download, icorn,free_tag,classify,address) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
        cursor.executemany(insert_sql,tuple(data))
        ret = cursor.lastrowid
        conn.commit()
    except Exception as error:
        conn.rollback()
        if classify == 'apps':
            r.sadd(WAIT_DEAL_APP_KEY,appId)
        else:
            r.sadd(WAIT_DEAL_GAME_KEY,appId)
        logger.info("执行sql异常,{a}".format(a=error))
    finally:
        if ret != '':
            if classify == 'apps':
                r.sadd(IDS_SET_APP_KEY,appId)
            else:
                r.sadd(WAIT_DEAL_GAME_KEY,appId)
        lock.release()
    return ret

def create_table():
    # 定义SQL语句，创建数据表
    create_apps_table_sql = """
    CREATE TABLE IF NOT EXISTS apps_info (
    `id` bigint NOT NULL AUTO_INCREMENT COMMENT '主键',
    `app_id` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL COMMENT '包名',
    `title` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL COMMENT '标题',
    `email` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci DEFAULT NULL COMMENT '开发者邮箱',
    `reviews` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci DEFAULT NULL COMMENT '评论数',
    `download` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci DEFAULT NULL COMMENT '下载数',
    `icorn` varchar(2000) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci DEFAULT NULL COMMENT '应用图标',
    `address` varchar(2000) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci DEFAULT NULL COMMENT '开发者地址',
    `free_tag` varchar(200) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci DEFAULT NULL COMMENT '购买标识',
    `classify` varchar(200) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci DEFAULT NULL COMMENT '类型，apps、games',
    `create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '创建时间',
    PRIMARY KEY (`id`),
    UNIQUE KEY `app_id_index` (`app_id`)
    ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci;
    """
    cursor.execute(create_apps_table_sql)

    create_keyword_table_sql = """
        CREATE TABLE `google_play_keyword` (
          `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
          `keyword` varchar(255) NOT NULL COMMENT '关键字',
          `classify` varchar(255) DEFAULT NULL COMMENT '区分是apps还是games的关键词',
          `sort` int(11) DEFAULT NULL COMMENT '排序',
          `is_available` tinyint(1) NOT NULL DEFAULT '1' COMMENT '是否可用，0-不可用，1-可用，默认1',
          `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '创建时间',
          PRIMARY KEY (`id`) USING BTREE,
          UNIQUE KEY `keyword_index` (`keyword`)
        ) ENGINE=InnoDB AUTO_INCREMENT=17310 DEFAULT CHARSET=utf8mb4 ROW_FORMAT=DYNAMIC;
        """
    cursor.execute(create_keyword_table_sql)

def query_to_database(appId):
    result = 0
    try:
        lock.acquire()
        query_sql = "select COUNT(*) from apps_info where app_id = %s limit 1;"
        cursor.execute(query_sql,appId)
        result = cursor.fetchone()[0]
    except Exception as error:
        logger.info("执行sql异常{a}".format(a=error))
    finally:
        lock.release()
    return result

# 获取数据库录入的关键词并写入到redis中，方便线程处理搜索爬数据使用，减轻对数据库的压力
# 有时间可以做下优化：因为关键词会不定期录入且数据量会越来越多，旧的基本上抓不到什么数据了，为了提高效率，每次启动时实时查询库内总条数与缓存中的关键词总数比对，存在差异则先清除重新写入，保证可以按最新的去抓
def query_keyword():
    try:
        lock.acquire()
        if r.llen('key:word:list') == 0:
            query_sql = "select keyword,classify from google_play_keyword where is_available = 1 order by create_time desc;"
            cursor.execute(query_sql)
            result = cursor.fetchall()
            for i in result:
                r.rpush('key:word:list', i[0])
            r.set('totalKey', r.llen('key:word:list'))
    except Exception as error:
        logger.info("执行sql异常{a}".format(a=error))
    finally:
        lock.release()

def query_key_count():
    result = 0
    try:
        query_sql = "select count(*) from google_play_keyword where is_available = 1;"
        cursor.execute(query_sql)
        result = cursor.fetchone()[0]
    except Exception as error:
        logger.info("执行sql异常{a}".format(a=error))
    return result

if __name__ == '__main__':
    logger.info("开始抓取网页数据,开始时间:{a}............".format(a=time.strftime("%Y-%m-%d %H:%M:%S",time.localtime())))
    create_table()
    # 每次启动时如果缓存中的keyword数量与数据库的不匹配则清空重新写入按最新的处理
        query_keyword()
        total = query_key_count()
        if r.exists('totalKey') and total != r.get('totalKey').decode():
            r.set('totalKey', total)
            r.delete('key:word:list')
    query_keyword()
    try:
        # 创建两个线程分别执行deal_app_data_by_redis和deal_game_data_by_redis
        t1 = threading.Thread(target=deal_app_data_by_redis)
        t2 = threading.Thread(target=deal_game_data_by_redis)
        t3 = threading.Thread(target=deal_search_detail)
        # 启动线程
        t1.start()
        t2.start()
        t3.start()

        # 等待两个线程结束
        t1.join()
        t2.join()
        t3.join()
    except Exception as e:
        logger.error("线程执行出异常,{a}".format(a=e))
    finally:
        endTime = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime())
        appCnt = r.scard(IDS_SET_APP_KEY)
        gameCnt = r.scard(IDS_SET_GAME_KEY)
        appWaitCnt = r.scard(WAIT_DEAL_APP_KEY)
        gameWaitCnt = r.scard(WAIT_DEAL_GAME_KEY)
        logger.info("处理结束:{t}，总处理apps条数{a},games条数{b},apps剩余待处理条数{c},games剩余处理条数{d}".format(t=endTime,a=appCnt,b=gameCnt,c=appWaitCnt,d=gameWaitCnt))
