#!/usr/bin/python3
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t -*-

import os
import re
import sys
import time
import json
import socket
import shutil
import subprocess
import lxml.html
import http.client
import urllib.error
import urllib.request
from datetime import datetime

import mirrors.plugin


class Main:

    def __init__(self, sock):
        self.url = "https://openclipart.org/search?p=%d"

        # from https://openclipart.org/api-docs/#
        self.schema = {
            "type": "object",
            "properties": {
                "id": {
                    "type": "integer",
                },
                "title": {
                    "type": "string",
                },
                "description": {
                    "type": "sring",
                },
                "tags": {
                    "type": "array",
                    "items": {
                        "type": "string",
                    },
                },
                "filename": {
                    "type": "string",
                },
                "filesize": {
                    "type": "integer",
                },
                "author": {
                    "type": "string",
                },
                "nsfs": {
                    "type": "boolean",
                },
                "sha1": {
                    "type": "string",
                },
                "created_at": {
                    "type": "string",
                    "format": "date-time",
                },
                "edited_at": {
                    "type": "string",
                    "format": "date-time",
                },
                "svg_file": {
                    "type": "string",
                    "format": "uri",
                },
                "url": {
                    "type": "string",
                    "format": "uri",
                },
                "thumbnails": {
                    "type": "object",
                    "properties": {
                        "small": {
                            "type": "string",
                            "format": "uri",
                        },
                        "medium": {
                            "type": "string",
                            "format": "uri",
                        },
                        "large": {
                            "type": "string",
                            "format": "uri",
                        },
                    },
                },
            },
        }

        self.sock = sock
        self.dataDir = mirrors.plugin.params["storage-file"]["data-directory"]
        self.p = InfoPrinter()
        self.progress = 0

    def run(self):
        maxPage = self._getMaxPage()




    def _getMaxPage(self):
        resp = urllib.request.urlopen(self.url % (1), timeout=60)
        root = lxml.html.parse(resp)
        for atag in root.xpath(".//a"):
            m = re.fullmatch("[0-9]+ / ([0-9]+)", atag.text)
            if m is not None:
                return int(m.group(1))

        raise Exception("get max page failed")

    def _downloadFromBing(self, endProgress):
        bingUrl = "https://bing.com"
        bingImagePath = "/HPImageArchive.aspx?format=js&idx=0&n=8&uhd=1&mkt=%s"       # format: json; start index: 0, fetch count: 8

        self.p.print("Downloading from %s" % (bingUrl))
        self.p.incIndent()
        try:
            if self.__isInChinaMainland():
                marketList = [
                    'zh-CN',
                ]
            else:
                marketList = [
                    'ar-XA', 'da-DK',
                    'de-AT', 'de-CH', 'de-DE',
                    'en-AU', 'en-CA', 'en-GB', 'en-ID', 'en-IE', 'en-IN', 'en-MY', 'en-NZ', 'en-PH', 'en-SG', 'en-US', 'en-WW', 'en-XA', 'en-ZA',
                    'es-AR', 'es-CL', 'es-ES', 'es-MX', 'es-US', 'es-XL',
                    'et-EE', 'fi-FI',
                    'fr-BE', 'fr-CA', 'fr-CH', 'fr-FR',
                    'he-IL', 'hr-HR', 'hu-HU', 'it-IT', 'ja-JP', 'ko-KR', 'lt-LT', 'lv-LV', 'nb-NO',
                    'nl-BE', 'nl-NL',
                    'pl-PL',
                    'pt-BR', 'pt-PT',
                    'ro-RO', 'ru-RU', 'sk-SK', 'sl-SL', 'sv-SE', 'th-TH', 'tr-TR', 'uk-UA',
                    'zh-CN', 'zh-HK', 'zh-TW',
                ]

            for market in marketList:
                self.p.print("Processing market %s" % (market))
                self.p.incIndent()
                try:
                    ret = self.__getJson(bingUrl + bingImagePath % (market))
                    for image in ret["images"]:
                        curDate = datetime.strptime(image["enddate"], "%Y%m%d")
                        imgUrl = bingUrl + re.sub("w=[0-9]+&h=[0-9]+", "", image["url"])    # remove w= and h=, so that we download the maximum resolution
                        self._processDate(market, curDate, imgUrl, {
                            "title": image["title"],
                            "url": imgUrl,
                            "copyright": image["copyright"],
                            "wp": image["wp"],                      # wp == false when background is animated
                            "hsh": image["hsh"],
                        })
                finally:
                    self.p.decIndent()
        finally:
            self.p.decIndent()

        self.sock.progress_changed(endProgress)

    def __getJson(self, url):
        while True:
            try:
                return json.load(urllib.request.urlopen(url))
            except socket.timeout as e:
                self.p.print("urlopen failed and try again: %s" % str(e))
                time.sleep(10.0)
            except http.client.HTTPException as e:
                self.p.print("urlopen failed and try again: %s" % str(e))
                time.sleep(10.0)
            except urllib.error.HTTPError as e:
                if e.code == 404:
                    raise
                self.p.print("urlopen failed and try again: %s" % str(e))
                time.sleep(10.0)
            except urllib.error.URLError as e:
                self.p.print("urlopen failed and try again: %s" % str(e))
                time.sleep(10.0)

    def __getStandardBingUrl(self, url):
        return re.sub(r"https?://.*bing\.com/", "https://bing.com/", url)

    def __compareMetadataJson(self, fullfn, newJson):
        oldJson = None
        with open(fullfn) as f:
            oldJson = json.load(f)

        keyList = ["title", "url", "copyright", "wp", "hsh"]
        for key in keyList:
            if key not in newJson:
                if key not in oldJson:
                    return 0
                else:
                    return -1
            else:
                if key not in oldJson:
                    return 1
                else:
                    if newJson[key] != oldJson[key]:
                        raise MetadataJsonCompareError(key, oldJson[key], newJson[key])
                    return 0

    def __downloadFile(self, url, localFile):
        while True:
            try:
                subprocess.check_call(["wget", "--quiet", "--no-check-certificate", "-O", localFile, url])      # always re-dowloand
                break
            except subprocess.CalledProcessError as e:
                if e.returncode == 8:       # not found
                    raise
                self.p.print("download failed and try again: %s" % str(e))
                time.sleep(60)


class MetadataJsonCompareError(Exception):

    def __init__(self, key, oldValue, newValue):
        super().__init__()
        self.key = key
        self.oldValue = oldValue
        self.newValue = newValue


class InfoPrinter:

    def __init__(self):
        self.indent = 0

    def incIndent(self):
        self.indent = self.indent + 1

    def decIndent(self):
        assert self.indent > 0
        self.indent = self.indent - 1

    def print(self, s):
        line = ""
        line += "\t" * self.indent
        line += s
        print(line)


class Util:

    @staticmethod
    def forceDelete(path):
        if os.path.islink(path):
            os.remove(path)
        elif os.path.isfile(path):
            os.remove(path)
        elif os.path.isdir(path):
            shutil.rmtree(path)
        elif os.path.lexists(path):
            os.remove(path)             # other type of file, such as device node
        else:
            pass                        # path does not exist, do nothing

    @staticmethod
    def forceMakeDir(path):
        if not os.path.exists(path):
            os.makedirs(path)
        elif not os.path.isdir(path):
            Util.forceDelete(path)
            os.mkdir(path)
        else:
            pass

    @staticmethod
    def forceClearDir(path):
        for fn in os.listdir(path):
            Util.forceDelete(os.path.join(path, fn))


###############################################################################

if __name__ == "__main__":
    with mirrors.plugin.ApiClient() as sock:
        try:
            Main(sock).run()
            sock.progress_changed(100)
        except Exception:
            sock.error_occured(sys.exc_info())
            raise










from bs4 import BeautifulSoup
import requests
import datetime
from pymongo import MongoClient, UpdateOne
import sys

URL_BYDATE = "https://openclipart.org/bydate?page="
URL_BASE = "https://openclipart.org"
URL_DOWNLOAD = "https://openclipart.org/download"


#mongo db imformation
from mongodb_info import * 

MAX_PAGE = 4168

error = []

#connect to mongodb, return None if connection failure
def getDB():
	try:
		client = MongoClient('mongodb://%s:%s@%s:%s/edudata' % (MONGO_USER, MONGO_PASSWORD, MONGO_HOST, MONGO_PORT))
		client.server_info()
		db = client.edudata
		return db.openclipart
	except Exception as e:
		print "Unexpected error:", e
		return None


#execute queries collecting from a page
def insertDB( openclipart, query):
	if query is not None:
		result = openclipart.bulk_write(query, ordered = False)
		print result.bulk_api_result



#get data from a page and turn them into queries
def getData(pageNum):
	try:
		res = requests.get(URL_BYDATE + str(pageNum));	
		soup = BeautifulSoup(res.text, "lxml");
		content = soup.body.find(id="bydate");
		if content is None:
			raise Exception("Null content")
	except Exception as e:
		print "Can't get page ", pageNum,":",e
		error.append({"url":URL_BYDATE + str(pageNum), 'type':'get page', 'Exception': e})
		return None

	update_queries = []

	for element in content.find_all(attrs={"class" : "r-img"}):
		try:
			url = URL_BASE + element.a["href"]
			res2 = requests.get(url);	
			soup2 = BeautifulSoup(res2.text,"lxml");
			content2 = soup2.body.find(id="view");

			data = {}
			data['_id'] = element.a["href"].split("/", 2)[2];
			if content2.h2 is not None:
				data['title'] = content2.h2.string;
			else:
				data['title'] = ""
			data['creator'] = content2.find(id = 'viewauthor').find(attrs = {"itemprop" : "name"}).string;
			data['createdate'] = content2.find(attrs={"itemprop" : "datePublished"})["content"]
			data['description'] = content2.find(id = "description").text;
			data['url'] = URL_BASE + content2.find(id="viewimg").a["href"]
			data['keyword'] = []
			pointer = content2.find(id="viewtags")
			for k in pointer.span.find_all('a'):
				data['keyword'].append(k.text);
			pointer = pointer.next_sibling.next_sibling.next_sibling.next_sibling
			data['views'] = int(pointer.text.split(' ')[0])
			pointer = pointer.next_sibling.next_sibling.next_sibling.next_sibling
			data['good'] = int(pointer.text.split(' ')[0])
			pointer = pointer.next_sibling.next_sibling.next_sibling.next_sibling
			data['filesize'] = long(pointer.text.split(' ')[0])
			currentTime = datetime.datetime.utcnow()
			data['update_at'] = currentTime
			update_queries.append(UpdateOne({'_id':data['_id']}, {'$set': data, '$setOnInsert':{'created_at':currentTime}},True))
		except Exception as e:
			print "Failed to get data %s, Exception:" % url , e
			error.append({'Error url' :url, 'type':'get data', 'Exception': e})

	return update_queries
		


if __name__ == '__main__':
	db = getDB();
	if db is None:
		print "No db connected"
		exit

	#update MAX_PAGE
	MAX_PAGE = updatePage(1)

	from_page = 1
	to_page = MAX_PAGE
	if len(sys.argv) > 1:
		from_page = int(sys.argv[1])
	if len(sys.argv) > 2:
		to_page = int(sys.argv[2])

	for i in range(from_page, to_page + 1):
		if i > MAX_PAGE:
			print "Meet max page", MAX_PAGE
			break;
		insertDB(db, getData(i));
		print "Page", i, "Done"

	print "Error log:",