#!/usr/bin/env python2
# -*- coding: UTF-8 -*-

# Copyright (c) 2011 Alexandre Défossez
# This file is part of HINAPT.
#
# HINAPT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HINAPT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HINAPT.  If not, see <http://www.gnu.org/licenses/>.

"""Crawler pour Google Image pour récupérer des images aléatoire pour
entraîner le classifieur Viola-Jones
Nécessite: (paquets Archlinux)
    - python2-gevent (disponible sur AUR pour Archlinux)
    - python-imaging
"""


import monkey
import gevent
import gevent.pool
from gevent import monkey; monkey.patch_socket()


import socket
import os
import os.path
import urllib2
import urllib
import re
import hashlib
import random
import urlparse
from cStringIO import StringIO

import Image


from BeautifulSoup import BeautifulSoup

words = open("words").read().split("\n")
current_id = 0

def open_url(url):
    request = urllib2.Request(url,
        headers={'User-Agent':'Mozilla/5.0\
        (X11; U; Linux i686; fr; rv:1.9.0.8)\
        Gecko/2009032711 Ubuntu/8.10 (intrepid) Firefox/3.0.8'}
    )
    return urllib2.urlopen(request)

def check(g):
    if g.exception is not None or not g.value:
        print "Respawning",g
        pool.spawn(fetch_one)


def fetch_one():
    global current_id
    id = current_id
    current_id+= 1
    word = random.sample(words, 1)[0]
    print id,"getting word",word,"http://www.google.fr/images?"+urllib.urlencode({'q':word})
    p = open_url("http://www.google.fr/images?"+urllib.urlencode({'q':word}))
    r = BeautifulSoup(p.read())
    url = dict(r.meta.attrs)["content"].split(";",1)[1].split("=",1)[1]
    print id,"going to",url
    p = open_url(url)
    r = BeautifulSoup(p.read())
    c = r(id="ImgCont")[0]
    i = random.randrange(0, 5)
    j = random.randrange(0, 4)
    u = c.table.contents[2*i].contents[j].a.attrs[0][1]
    img_url = urlparse.parse_qs(u.split("?",1)[1])["imgurl"][0]
    ext = os.path.splitext(img_url)[1]
    if ext.lower() not in (".jpg", ".jpeg"):
        print id, "image discarded", img_url
        return False
    print id,"getting image",i,j, img_url
    p = open_url(img_url)
    d = []
    while True:
        c = p.read()
        if not c:
            break
        d.append(c)
    d = "".join(d)
    name = hashlib.md5(d).hexdigest()+".jpg"
    f = StringIO(d)
    out = StringIO()
    Image.open(f).convert("L").save(out, "jpeg")
    open("img/"+name, "w").write(out.getvalue())
    print id,"saved to", name
    return True

def main():
    global pool
    N_IMG = 100
    pool = gevent.pool.Pool(4)
    if not os.path.exists("img"):
        os.mkdir("img")
    for i in range(len(os.listdir("img")), N_IMG):
        g = pool.spawn(fetch_one)
        g.link(check)
    pool.join()


if __name__ == '__main__': main()
