import re
from urllib import request
import os
import requests
import json
from MysqlUtils import *
import pymysql

'''
批量插入mysql+分页
'''


class SpiderTestPagePage(object):
    # 数据库链接实例化
    global conn, cursor# 全局变量 也可以用self.conn调用
    mysql = MysqlUtils()
    conn = mysql.get_mysql_connection()
    # 创建游标
    cursor = conn.cursor()

    url = 'https://coding.imooc.com/'
    url2 = 'https://coding.imooc.com/?sort=0&unlearn=0&page=2'
    '''
    正则
    '''
    div_pattern = '<div class="shizhan-intro-box">([\s\S]*?)</div>'
    courses_name_pattern = '<p class="shizan-name" title=([\s\S]*?)</p>'
    imag_div_pattern='<div class="img-box">([\s\S]*?)</div>'
    imag_pattern='<div class="img-box">([\s\S]*?)</div>'
    name_pattern = 'class="">([\s\S]*?)</a>\\n'

    '''
    分页
    '''
    offset = 0
    def crawl(self, params=None):
        # 必须指定UA，否则服务器会判定请求不合法
        headers = {
            "Host": "coding.imooc.com",
            "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) "
                          "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36",
        }

       # print(self.url)
       #  response = requests.get(self.url, headers=headers, params=params)
       #  print('response:',response.url)
       #https://coding.imooc.com/?sort=0&unlearn=0&page=2
        urls=[]
        while self.offset<4:
            url = self.url+'?sort=0'+'&unlearn=0'+'&page='+str(self.offset)
            urls.append(url)
            self.offset+=1
        return  urls
        #if __name__ == '__main__':
                # url = response
                # 分页加载更多，递归调用 这里为了演示只获取前100条数据
                # while self.offset < 3:
                #    # self.parse(url)
                #     self.offset += 1
                #     params = {"sort": 0, "unlearn":0,"page": self.offset}
                #     self.crawl(params)
                #     urls.append(url)


    '''
     抓取页面
    '''
    def __fetch_content(self,url):

        opUrl = request.urlopen(url)
        print(opUrl)
        htmls = opUrl.read()
        htmls = str(htmls, encoding='utf-8')
        return htmls
    '''
    获取图片
    '''
    def getImg(html):
        reg = r'src="(.+?\.jpg)" pic_ext'

        imgre = re.compile(reg)
        imglist = imgre.findall(html)  # 表示在整个网页中过滤出所有图片的地址，放在imglist中
        x = 0
        path = 'D:\\test'
          # 将图片保存到D:\\test文件夹中，如果没有test文件夹则创建
        if not os.path.isdir(path):
            os.makedirs(path)
        paths = path + '\\'  # 保存在test路径下
        for imgurl in imglist:
            request.urlretrieve(imgurl, '{}{}.jpg'.format(paths, x))  # 打开imglist中保存的图片网址，并下载图片保存在本地，format格式化字符串
        x = x + 1
        return imglist

    # 正则分析
    def __analysis(self, htmls):
        #获取页面源码
        root_html = re.findall(self.div_pattern, htmls)
        #获取名称
        courses_namelist = re.findall(self.courses_name_pattern, str(root_html))
        #获取图片
        # 组装数据
        names = self.__data_assembly(courses_namelist)
        return names

    '''
    组装数据
    '''
    def __data_assembly(self, nameList):

        names = []
        # print(nameList)
        for i in range(len(nameList)):
            oname = nameList[i]
            start_index=oname.index('>')
            name = oname[start_index+1:]
            names.append(name)
        #print(names)
        return names
    '''
    启动入口
    '''
    def go(self):
        urls=self.crawl()
        #循环分页
        values=[]
        valueStrings = []
        for urlIndex in range (len(urls)):
            root_html = self.__fetch_content(urls[urlIndex])
            nameList = self.__analysis(root_html)

            1 == 1
            print(values, end='')
            values+=nameList
        try:
           # self.cursor.execute('insert into SpiderTestPage(name) values(%s)', nameList)
            #for key in range(len(values)):
            #    valueStrings.append(values[key])
                # 提交
            cursor.executemany('insert into spidertest(name) values(%s)', values)
            print('插入--:',len(values),'条数据',end='')
            conn.commit()
        except Exception as e:
            conn.rollback()
            print('插入数据失败：'+e.args,end='')
        #  print(values)
        print('valueStrings:',valueStrings)
        # 关闭
        cursor.close()
        conn.close()

spidertestpage = SpiderTestPagePage()

spidertestpage.go()
