# long 爬虫
# {2021/9/8}
# 本章目的：三峡旅游预定网
# 正则

import pymysql.cursors,time,json,base64
import requests
import re



def model(sql):
    # Connect to the database
    db = pymysql.connect(host='127.0.0.1',
                         port=3306,
                         user='root',
                         password='root',
                         database='lvyou',
                         charset='utf8mb4',
                         cursorclass=pymysql.cursors.DictCursor)
    try:
        # 2.创建游标
        cursor = db.cursor()
        # 4.执行sql
        row = cursor.execute(sql)
        print(row)
        db.commit()

        # 5.提取结果fetchone,提取结果fetchall
        data = cursor.fetchall()
    #         返回数据判断
        if data:
            return data
        else:
            return row

    except:
        db.rollback()#当代码出现错误时，进行反馈
    finally:
        # 关闭数据库连接
        db.close()

def Prcj(url):
    #url地址
    url=url
    # 进行请求
    data = requests.get(url)
    # 选择编码
    data.encoding='utf-8'

    #
    # 正则预加载
    # 列表页
    obj = re.compile(r'<div class="x3"><a href="(?P<href>.*?)"><img src="(?P<img>.*?)".*?'
                      r'class="text-big height">(?P<title>.*?)</a>.*?html">(?P<brtitle>.*?)</a></p>',re.S)
    # 子文档
    obj1 =re.compile(r'<meta name=\'keywords\' content="(?P<keywords>.*?)">.*?name=\'description\' content="(?P<description>.*?)">.*?首页</a> '
                     r'</li>.*?\'>(?P<type>.*?)</a> </li>.*?<p class="text-center">时间：(?P<wztime>.*?) '
                     r'作者：(?P<wzzuoze>.*?)来源：(?P<wzlaiyuan>.*?)</p>(?P<content>.*?)</div>',re.S)

    # 正则匹配
    result = obj.finditer(data.text)
    # url链接
    domain = "http://www.0717nt.com/"

    # 把数据转换list
    child_list_href = []
    child_list_img = []
    child_list_name = []
    child_list_title = []
    child_list_brtitle = []
    for it in result:
        # 拼接子页面url地址: 域名 + 子页面
        child_href = domain + it.group("href").strip("/")
        child_list_href.append(child_href)
        # 拼接图片url地址: 域名 + 子页面
        child_img = domain + it.group("img").strip("/")
        child_list_img.append(child_img)
        # 名字
        child_list_name.append(it.group("title"))
        child_list_title.append(it.group("title"))
        # 简要
        child_list_brtitle.append(it.group("brtitle"))


    '''
    #sql数据库的
          id 自增
          type 类别
          title 标题       
                  type 类别
                  name  名字
                  title 标题
                  brtitle 简要标题
          data    keywords  关键字
                  description 描述
                  img   缩率图
                  wztime 文章时间
                  wzzuoze 文章作者
                  wzlaiyuan 文章来源
                  content 文章
          href  文章来源链接  
          addtime   采集时间
      
    '''


    with open('lyzj.sql','a',encoding='utf-8') as f:
    #提取子页面内容
        # 先得到条数
        cont = len(child_list_href)
        print(cont,'条数据')
        b= True
        a = 0
        while b:
            print(f'第{a}次')
            print(child_list_href[a])
            child_resp = requests.get(child_list_href[a]) # 进行请求
            child_resp.encoding='utf-8' # 选择编码
            result1 = obj1.search(child_resp.text)

            # re.sub(【正则表达式】, 【替换成的字符串】, 【被匹配的字符串】)
            wd_content = re.sub(r' src="/jsl/','src="'+domain+'jsl/',result1.group("content"),re.S) #替换了图片
            type = result1.group("type")
            name = child_list_name[a]
            title = child_list_title[a]
            brtitle = child_list_brtitle[a]
            keywords = result1.group("keywords")
            description = result1.group("description")
            img = child_list_img[a]
            wztime = result1.group("wztime")
            wzzuoze = result1.group("wzzuoze")
            wzlaiyuan = result1.group("wzlaiyuan")
            content = wd_content
            href = child_list_href[a]


            data = [{'type': type,
                     'name': name,
                     'title': title,
                     'brtitle': brtitle,
                     'keywords': keywords,
                     'description': description,
                     'img': img,
                     'wztime': wztime,
                     'wzzuoze': wzzuoze,
                     'wzlaiyuan': wzlaiyuan,
                     'content': content}]


            addtime = time.strftime('%Y-%m-%d %H:%I:%S')
            type = result1.group("type")
            # 转义
            # 以json存入数据库
            data2 = json.dumps(data, ensure_ascii=False)
            # 用base64加密
            data1 = base64.encodebytes(data2.encode()).decode()
            # 用base64解密
            # decode_dict = base64.decodebytes(data1)
            # json解析
            # text = json.loads(data2)

            sql = f'insert into `lyzj` values (null,"{type}","{title}","{data1}","{href}","{addtime}")'
            f.writelines(sql+"\n")
            # 妈的直接存入不了先写入sql文件在导入就可以了（我用把他转换json也存入不了）
            res = model(sql)
            print(res)
            a += 1
            # 跳出循环
            if (a == cont):
                b=False



# print(data.text)
# "http://www.0717nt.com/sanxialvyou.php?page=1"三峡旅游    1页
for i in range(1,2):
    url = f'http://www.0717nt.com/sanxialvyou.php?page={i}'
    print(url)
    Prcj(url)
# # "http://www.0717nt.com/news.php?page=1"       旅游资讯 10页
# for i in range(1,11):
#     url = f'http://www.0717nt.com/news.php?page={i}'
#     print(url)
#     Prcj(url)
# # http://www.0717nt.com/hubeilvyou.php?page=1   湖北旅游    2页
# for i in range(1,3):
#     url = f'http://www.0717nt.com/hubeilvyou.php?page={i}'
#     print(url)
#     Prcj(url)
# # http://www.0717nt.com/yichanglvyou.php?page=1    宜昌旅游  2页
# for i in range(1,3):
#     url = f'http://www.0717nt.com/yichanglvyou.php?page={i}'
#     print(url)
#     Prcj(url)
# # http://www.0717nt.com/sanxiayoulun.php?page=1    三峡游轮 4页
# for i in range(1,5):
#     url = f'http://www.0717nt.com/sanxiayoulun.php?page={i}'
#     print(url)
#     Prcj(url)
# # http://www.0717nt.com/guoneilvyou.php?page=1    国内旅游  1页
# for i in range(1,2):
#     url = f'http://www.0717nt.com/guoneilvyou.php?page={i}'
#     print(url)
#     Prcj(url)
