#!/usr/bin/env python
# -*- encoding:utf-8 -*-

import sys
import time
import re
import urllib
import random
import json
import requests
from bs4 import BeautifulSoup
from xproxy import ProxyBox as PB
from xproxy import ProxyTool as PT
import logging
reload(sys)
sys.setdefaultencoding('utf-8')

fmt = '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s'
logging.basicConfig(level=logging.INFO,
                    format=fmt,
                    datefmt='%a, %d %b %Y %H:%M:%S',
                    filename='log.txt',
                    filemode='w')


class Dict(dict):

    def __getattr__(self, key):
        try:
            return self[key]
        except KeyError:
            return None

    def __setattr__(self, key, value):
        self[key] = value

    def __delattr__(self, key):
        try:
            del self[key]
        except KeyError:
            pass

    def __repr__(self):
        return '<Data: ' + dict.__repr__(self) + '>'


def has_class_but_no_id_p(tag):
    return tag.name == 'p' and tag.has_attr('class') and not tag.has_attr('id')


class ErrGpage:
    def __init__(self, msg='Page error'):
        self.msg = msg


class RobotCheck:
    def __init__(self, msg='Robot check', recaptcha_url=''):
        self.msg = msg
        self.url = recaptcha_url


class Gpage(object):
    def __init__(self, html):
        if not self.html:
            raise ErrGpage('Page empty')
        self.html = BeautifulSoup(html, 'html5lib')
        if self.in_robot_check():
            raise RobotCheck()
        self.body_style = self.html.body.get('class')
        self.results_html = self.results_content()
        self.relates_html = self.relates_content()
        self.nav = self.nav_content()
        self.relates = self.parse_relate()
        self.results = self.parse_item()
        self.stats = self.parse_stats()
        self.cites = self.parse_cites()

    def results_content(self):
        if not self.body_style:
            return self.html.find_all('p')
        return self.html.find_all('div', {'class': 'g'})

    def relates_content(self):
        if not self.body_style:
            return None
        return self.html.find_all(has_class_but_no_id_p)

    def nav_content(self):
        return self.html.find('table', id='nav')

    def parse_cites(self):
        ret = [c.get_text() for c in self.html.find_all('cite')]
        return set([self.extractDomain(r) for r in ret if r])

    # extract domain from url
    def extractDomain(self, url):
        domain = ''
        pattern = re.compile(r'(http[s]?://)?([^/]+)/', re.U | re.M)
        url_match = pattern.search(url)
        if(url_match and url_match.lastindex > 1):
            domain = url_match.group(2)

        return domain

    # extract a url from a link
    def extractUrl(self, href):
        url = ''
        pattern = re.compile(r'q=((http[s]?://)?[^&]+)&', re.U | re.M)
        url_match = pattern.search(href)
        if(url_match and url_match.lastindex > 0):
            url = url_match.group(1)

        return url

    def parse_item(self):
        item = Dict(title='', url='', time='')
        ret = {}
        num = 1
        for i in self.results_html:
            logging.debug('[*] Parse item %d' % num)
            try:
                # h3 = i.find('h3', 'r')
                item.title = i.h3.a.get_text()
                url = self.extractUrl(i.h3.a['href'])
                item.url = urllib.unquote(url)
                spf = i.find('span', 'f')
                item.time = spf.string if spf else ''
                u = self.extractDomain(item.url)
                logging.debug('[*] Get item: %s' % item)
                if u not in ret:
                    url = item.pop('url')
                    ret[u] = {url: item.copy()}
                elif item.url not in ret[u]:
                    url = item.pop('url')
                    ret[u].update({url: item.copy()})
                num += 1
            except Exception as e:
                logging.exception(e)
                logging.debug(i)
                continue

        del item
        return Dict(ret)

    def parse_relate(self):
        if not self.relates_content:
            return None
        return [i.a.get_text() for i in self.relates_html if i.a]

    def has_next(self):
        if not self.nav:
            logging.warning('No page nav')
            logging.info('Try to get <strong>Next</strong>')
            _next = self.html.find('strong', text='Next')
            logging.debug('Get %s' % _next)
            return True if _next else False
        return self.nav.find(text='Next') is not None

    def parse_stats(self):
        stats = self.html.find(id='resultStats')
        return stats.get_text().strip() if stats else ''

    def current(self):
        return self.nav.find('td', 'cur').get_text()

    def in_robot_check(self):
        return self.html.find('div', {'id': 'recaptcha'}) is not None

    def get_recaptcha(self):
        pass


class ParseGpage(object):
    def __init__(self, html):
        self.html = html
        self.relates = []
        self.cites = []
        self.result_set = {}

    def __enter__(self):
        if not self.html:
            raise ErrGpage('Page empty')
        page = Gpage(self.html)
        if page.in_robot_check():
            raise RobotCheck()
        self.relates += page.relates
        self.cites += page.cites
        for k, r in page.results.iteritems():
            if k not in self.result_set:
                self.result_set[k] = r
            else:
                self.result_set[k].update(r)

    def __exit__(self, type, value, traceback):
        logging.debug('type: %s' % type)
        logging.debug('value: %s' % value)
        logging.exception(traceback)
        self.relates = list(set(self.relates))
        self.cites = list(set(self.cites))
        self.ret = dict(relates=self.relates,
                        cites=self.cites,
                        results=self.result_set)
        self.json.dump(self.ret, open('results.txt', 'a'), ensure_ascii=False)


class Google(object):
    BASE_URL = 'https://www.google.com'
    PAGE_COUNT = 10
    UA_LIST = []
    LANGUAGE = 'en'

    def __init__(self):
        self.html = ''
        self.ua ='Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'
        self._prev = None
        self._next = None
        self.pbox = [p['Proxy'] for p in PB().get('excellent')]
        self.proxies = None

    def load_user_agent(self, uafile='./user_agents'):
        logging.info('[*] Loading UA list...')
        with open(uafile, 'r') as fp:
            self.UA_LIST = [ua.strip() for ua in fp.readlines()]
        logging.info('[+] Loaded %d UA' % len(self.UA_LIST))

    def randomSleep(self):
        sleeptime = random.randint(60, 120)
        logging.info('[*] Sleep %d secs...' % sleeptime)
        time.sleep(sleeptime)

    def randomUA(self):
        self.ua = random.choice(self.UA_LIST)
        logging.info('[*] Random choice UA: %s' % self.ua)

    def randomProxy(self):
        self.proxies = random.choice(self.pbox)
        logging.info('[*] Random choice proxy: %s' % self.proxies)
        self.proxies = PT.proxy2dict(self.proxies)

    def getpage(self, query, start=0, proxy=None):
        params = dict(hl=self.LANGUAGE,
                      num=self.PAGE_COUNT,
                      start=start,
                      q=query)
        logging.debug(json.dumps(params))
        logging.debug('[*] Get html page...')
        r = requests.get(self.BASE_URL + '/search',
                         params=params,
                         headers={'User-agent': self.ua,
                                  # 'connection': 'keep-alive',
                                  'Accept-Encoding': 'gzip',
                                  'referer': self.BASE_URL},
                         proxies=proxy or self.proxies)
        logging.debug(r.status_code)
        # logging.debug(r.text)
        self.html = r.text
        with open('debug.html', 'w') as f:
            f.write(self.html)

        return r.text

    # search web
    # @param query -> query key words
    # @param lang -> language of search results
    # @param page_num -> page number of search results to return
    def search(self, query, start_page=1, page_num=1, proxy=None):
        # search_results = []
        result_set = {}
        cites = []
        relates = []
        num = 0
        logging.info('[*] Try to get %d pages results from page %d' % (page_num, start_page))
        while num < page_num:
            start = self.PAGE_COUNT * (start_page + num - 1)
            html = self.getpage(query, start, proxy)
            logging.info('[+] Parse page %d...' % (start_page + num))
            try:
                page = Gpage(html)
                relates += page.relates
                cites += page.cites
                for k, r in page.results.iteritems():
                    if k not in result_set:
                        result_set[k] = r
                    else:
                        result_set[k].update(r)
            except RobotCheck as e:
                logging.exception(e)
                self.randomProxy()
                self.randomUA()
                st = random.choice(xrange(10, 30))
                logging.info('[*] Sleep %d secs' % st)
                time.sleep(st)
                continue
                # break
            except ErrGpage:
                logging.error('[!] Page error')
                st = random.choice(xrange(5, 10))
                logging.info('[*] Sleep %d secs' % st)
                time.sleep(st)
                continue
            # search_results += page.results
            # search_results.append(page)
            if not page.has_next():
                logging.info('[*] Get last page')
                with open('lastpage.html', 'w') as f:
                    f.write(html)
                break
            num += 1
            st = random.choice(xrange(5, 10))
            logging.info('[*] Sleep %d secs' % st)
            time.sleep(st)

        relates = list(set(relates))
        cites = list(set(cites))

        return dict(relates=relates, cites=cites, results=result_set)


def main():
    g = Google()
    g.load_user_agent()
    g.randomUA()
    g.randomProxy()
    g.PAGE_COUNT = 100

    if len(sys.argv) < 2:
        with open('./keywords', 'r') as kf:
            keywords = [k.strip() for k in kf.readlines()]
        for kw in keywords:
            results = g.search(kw)
    else:
        kw = sys.argv[1]
        spage = int(sys.argv[2]) if len(sys.argv) == 3 else 1
        pnum = int(sys.argv[3]) if len(sys.argv) == 4 else 1
        results = g.search(kw, spage, pnum)

    json.dump(results, open('results.txt', 'a'), ensure_ascii=False)


if __name__ == '__main__':
    main()
