# encoding:utf-8
'''
@author : xaing
@time : 2018/5/10 19:31
@地址 : 广州
@作用 : 使用解析吧来解析91的真实并放入到数据库中
@结果 :
'''

import requests
import urllib
import json
import urllib.request
import urllib.parse
import sqliteCRUD as crud
import time

# 基础的请求地址
baseUrl = 'http://www.jiexiba.tech/apiapi/getlink'

header = {'Accept' : '*/*',
            'Accept-Encoding' : 'gzip, deflate',
            'Accept-Language' : 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
            'Cache-Control' : 'no-cache',
            'Connection' : 'keep-alive',
            'Content-Length':'122',
            'content-type':'application/json',
            'DNT':'1',
            'Host':'www.jiexiba.tech',
            'origin':'http://www.jiexiba.tech',
            'Pragma':'no-cache',
            'Referer':'http://www.jiexiba.tech/',
            'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0'
        }



def get_json(items):
    '''
    解析json数据
    :param items:
    :return:
    '''
    for item in items:
        yield {
            'result': item[0]
        }


def getRequestUrl(requestUrl):
    '''
    使用post请求，并添加json数据，获取解析后的真实地址
    :param requestUrl: 待解析的地址
    :return: 视频真实地址
    '''
    dateStr = {'id': 'porn', 'link': requestUrl}
    # 编码为json
    jsonStr = json.dumps(dateStr)
    req = requests.post(baseUrl, data=jsonStr, headers=header)
    print(req.text) # Too Many Requests
    # 解析json数据
    hjson = json.loads(req.text)
    print("获取到真实地址：" + str(hjson['result']))
    return hjson['result']



def cycleReptile():
    '''
    如果爬虫出现异常，循环执行
    :return:
    '''
    try:
        # 获取数据库连接
        conn = crud.getConnection()
        # 查询数据库
        # resu = crud.selectSQL(conn, "select * from table91 where realUrl IS NOT NULL")
        resu = crud.selectSQL(conn, "select * from table91 where realUrl IS NULL")
        # 结果遍历
        if len(resu) > 0:
            for e in range(len(resu)):
                # 获取一行数据
                # print(resu[e])
                rowDate = resu[e]
                # 得到待解析的url
                waithandleUrl = rowDate[5]
                # print(waithandleUrl)
                if waithandleUrl != None and waithandleUrl != "":
                    # 使用post请求，并添加json数据，获取解析后的真实地址
                    result = getRequestUrl(waithandleUrl)
                    if result != None and result != "":
                        # 更新数据库
                        updateSql = '''update table91 set realUrl = ? where href = ?'''
                        dataDB = [(result, waithandleUrl)]
                        conn = crud.getConnection()
                        crud.executeSQL(conn, updateSql, dataDB)
                        time.sleep(20)
                    else:
                        # 对无法拿到的删除
                        print("删除无法解析的数据，待删除的参数=======" + str(waithandleUrl))
                        deleteSql = '''DELETE FROM table91 where href = ?'''
                        # dataDB = [(waithandleUrl)] 1个元素的tuple写法是(address,)，或者写成列表[address]
                        dataDB = (waithandleUrl)
                        conn = crud.getConnection()
                        crud.executeSQL(conn, deleteSql, dataDB)
    except BaseException as e:
        print(e)
        time.sleep(60)
        cycleReptile()



cycleReptile()
