import stat
import os
import re
from urllib import request

from MysqlUtils import *

'''
批量插入mysql+分页+image
'''


class SpiderTestPagePage(object):
    # 数据库链接实例化
    global conn, cursor  # 全局变量 也可以用self.conn调用
    mysql = MysqlUtils()
    conn = mysql.get_mysql_connection()
    # 创建游标
    cursor = conn.cursor()

    url = 'https://coding.imooc.com/'
    url2 = 'https://coding.imooc.com/?sort=0&unlearn=0&page=2'
    '''
    正则
    '''
    div_pattern = '<div class="shizhan-intro-box">([\s\S]*?)</div>'
    courses_name_pattern = '<p class="shizan-name" title=([\s\S]*?)</p>'
    imag_div_pattern = '<div class="img-box">([\s\S]*?)</div>'
    imag_pattern = 'src=\"([\s\S]*?)\" />'
    div_image_pattern = '<div class="img-box">([\s\S]*?)</div>'
    course_img_pattern = '<img class=\"shizhan-course-img\" alt=([\s\S]*?) src=\"([\s\S]*?)\" />'
    name_pattern = 'class="">([\s\S]*?)</a>\\n'

    '''
    分页
    '''
    offset = 0

    def crawl(self, params=None):
        # 必须指定UA，否则服务器会判定请求不合法
        headers = {
            "Host": "coding.imooc.com",
            "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) "
                          "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36",
        }

        # print(self.url)
        #  response = requests.get(self.url, headers=headers, params=params)
        #  print('response:',response.url)
        # https://coding.imooc.com/?sort=0&unlearn=0&page=2
        urls = []
        while self.offset < 1:
            url = self.url + '?sort=0' + '&unlearn=0' + '&page=' + str(self.offset)
            urls.append(url)
            self.offset += 1
        return urls
        # if __name__ == '__main__':
        # url = response
        # 分页加载更多，递归调用 这里为了演示只获取前100条数据
        # while self.offset < 3:
        #    # self.parse(url)
        #     self.offset += 1
        #     params = {"sort": 0, "unlearn":0,"page": self.offset}
        #     self.crawl(params)
        #     urls.append(url)

    '''
     抓取页面
    '''

    def __fetch_content(self, url):

        opUrl = request.urlopen(url)
        print(opUrl)
        htmls = opUrl.read()
        htmls = str(htmls, encoding='utf-8')
        return htmls

    '''
    获取图片
    '''

    def getImg(self, imglist):
        # reg = r'src="(.+?\.jpg)" pic_ext'

        # imgre = re.compile(reg)
        # imglist = imgre.findall(html)  # 表示在整个网页中过滤出所有图片的地址，放在imglist中

        x = 0
        file_path = 'D:/python_test'
        # 将图片保存到D:\\test文件夹中，如果没有test文件夹则创建
        if not os.path.isdir(file_path):
            os.makedirs(file_path)
        for imageurl in imglist:
            file_suffix = os.path.splitext(imageurl)[1]
            print(file_suffix)
            # 拼接图片名
            # file_name = '{}{}{}{}'.format(file_path, os.sep, file_name['drug_name'].encode('utf-8'), file_suffix)
            file_name = '{}{}{}{}'.format(file_path, os.sep, x, file_suffix)
            x+=1
            print(file_name)
            # 下载图片 并保存到本地文件夹
            request.urlretrieve(imageurl, filename=file_name)
        return imglist

    # 正则分析
    def __analysis(self, htmls):
        # 获取页面源码
        src_images = re.findall(self.course_img_pattern, htmls)
        # src_name = re.findall(self.imag_pattern, str(src_images))
        images = []
        for key in range(len(src_images)):
            images.append(src_images[key][1])
            1 == 1
        # 获取名称
        # courses_namelist = re.findall(self.courses_name_pattern, str(root_html))
        # 获取图片
        # 组装数据
        # names = self.__data_assembly(courses_namelist)
        return images

    '''
    组装数据
    '''

    def __data_assembly(self, nameList):

        names = []
        # print(nameList)
        for i in range(len(nameList)):
            oname = nameList[i]
            start_index = oname.index('>')
            name = oname[start_index + 1:]
            names.append(name)
        # print(names)
        return names

    '''
    启动入口
    '''

    def go(self):
        urls = self.crawl()
        # 循环分页
        values = []
        valueStrings = []
        for urlIndex in range(len(urls)):
            root_html = self.__fetch_content(urls[urlIndex])
            nameList = self.__analysis(root_html)

            1 == 1
            print(values, end='')
            values += nameList
        # 地址拼接
        image_list = self.image_url_assembly(values)
        #下载图片
        image = self.getImg(image_list)
        print(image)
        print("t图片地址：", values)
        # try:
        #    # self.cursor.execute('insert into SpiderTestPage(name) values(%s)', nameList)
        #     #for key in range(len(values)):
        #     #    valueStrings.append(values[key])
        #         # 提交
        #     cursor.executemany('insert into spidertest(name) values(%s)', values)
        #     print('插入--:',len(values),'条数据',end='')
        #     conn.commit()
        # except Exception as e:
        #     conn.rollback()
        #     print('插入数据失败：'+e.args,end='')
        # #  print(values)
        # print('valueStrings:',valueStrings)
        # # 关闭
        # cursor.close()
        # conn.close()

    '''
    图片地址组装
    '''

    def image_url_assembly(self, values):
        full_url=[]
        for key in range(len(values)):
            full_url.append('http:'+values[key])
        return full_url

spidertestpage = SpiderTestPagePage()

spidertestpage.go()
