# import libraries
import requests
import re
import os


# define class

class PetSpider():
    def __init__(self, image_path, html_path, url_path):
        self.image_path = image_path
        self.html_path = html_path
        self.url_path = url_path

        # get webpages

    def get_html_content(self, url, url_prefix):
        r = requests.get(url, timeout=60)
        page_content = r.content.decode('utf-8')
        page_div = re.compile(r'<div class="page">(.*?)</div>', re.I | re.S | re.M).findall(page_content)
        if page_div:
            current_page = re.compile(r'<a class ="current" href="javascript:void\(0\);">(.*?)</a>',
                                      re.I | re.S | re.M).findall(page_div[0])
            if current_page:
                html_file_name = '%s/web_page_%s.txt' % (self.html_path, current_page[0])
                print('downloading %s' % (html_file_name))
                f = open(html_file_name, "wb")
                f.write(r.content)
                f.close()
            else:
                print('current_page not fount')
            next_url = re.compile(r'<a class="next" href="(.*?)">&raquo;</a>', re.I | re.S | re.M).findall(page_div[0])
            if next_url:
                next_url = url_prefix + next_url[0]
                self.get_html_content(next_url, url_prefix)
            else:
                if current_page:
                    print('download over')
                else:
                    print('next_url not found')
        else:
            print('page_div not found')

    # get urls from webpages
    def get_urls(self):
        url_content = ''
        for _, _, file_list in os.walk(self.html_path):
            pass
        file_count = len(file_list)
        counter = 1

        for i in file_list:
            file_name = '%s/%s' % (self.html_path, i)
            print('extracting urls from %s. %d/%d' % (file_name, counter, file_count))
            f = open(file_name, "rb")
            page_content = f.read().decode('utf-8')
            f.close()
            urls = re.compile(r'<img src="(.*?)" />', re.I | re.S | re.M).findall(page_content)
            if urls:
                for i in urls:
                    if i.find('alt') >= 0:
                        continue
                    url_content += i
                    url_content += '\n'
                else:
                    print('url not found')
                counter += 1

        # write urls into file
        f = open('%s/urls.txt' % (self.url_path), "wb")
        f.write(bytes(url_content, encoding='utf8'))
        f.close()

# download images
    def download_images(self):
        rows = open('%s/urls.txt' % (self.url_path)).read().split("\n")
        current_image_number = 1
        urls_count = len(rows)

        for url in rows:
            try:
                # try to download the image
                r = requests.get(url, timeout=60)
                # save the image to disk
                image_file = '%s/img_%d.jpg' % (self.image_path, current_image_number)
                f = open(image_file, "wb")
                f.write(r.content)
                f.close()
                print('download %s. %d/%d' % (image_file, current_image_number, urls_count))
                current_image_number += 1
            except:
                print('%s has an exception. skip.' % (url))


