'''
A web crawler, for search for user informations on web
'''

from requests_html import HTMLSession
import asyncio
import re
email = '[\w\.\-]+\@[\w\.\-]+\.\w+'
url = 'http\w:\/\/[\w+.]+[\/\w+.-]+'
session = HTMLSession()

def google_search(query:str) -> str:
    element_links = session.get(
        f'http://google.com/search?q={query}'
    ).html.xpath('//div[@class="r"]/a[@href]')

    for link in element_links:
        yield link.attrs['href']

def search_more_links(list_of_links):
    for link in list_of_links:
        response = session.get(link)
        search_links = response.html.absolute_links
        list_of_links.append(search_links)
        return list_of_links


def get_page_source(list_of_links):
    for link in list_of_links:
        response = session.get(link)
        search_html = response.html.html
        list_of_html = []
        list_of_html.append(search_html)
        return list_of_html


def get_page_text(list_of_links):
    for link in list_of_links:
        response = session.get(link)
        search_text = response.html.text
        list_of_text_in_html = []
        list_of_text_in_html.append(search_text)
        return list_of_text_in_html




if __name__ == '__main__':
    from sys import argv
    links = list(google_search(argv[1]))
    # links = search_more_links(links)
    text = get_page_text(links)
    print(text)
    print(links)
