import requests as rq
from bs4 import BeautifulSoup as bs

# generate url for each page
def get_url(keyword = "", page = 0):
    base = "https://baidu.com/s?wd=" + keyword
    
    # 50 result perpage
    url = base + "&rn=50" 
    
    # generate the last page number
    url = base + "&pn=" + str((page + 1) * 50) 

    return url

def extract_info(html):
    # convert to soup object
    soup = bs(html, 'lxml')

    # will store the result here for every page
    data = []

    # we will search the div tag for every result item
    results = soup.findAll("div", { "class" : "result" })

    for r in results:
        # get the a tag fro div, it has the result text 😃, then store inside the data
        data.append(r.find('a').text)
    return data    


def save_data(filename, data):
    # open a file named as below, and file mode is append, `a` for short
    with open(filename + ".txt", 'a',encoding='utf-8') as f:
        # itrate the result from the data list
        for one_data in data:
            # write each line of data then end with a line breaker
            f.write(one_data + "\n");

        # close the file, you can skip this part
        f.close()
        
# search until the last page
def search(keyword = '', lastPage = 0):

    # search result will be here
    data = []

    # search for each page
    for i in range(lastPage):
        url = get_url(keyword, i)
        
        # this get's the html code for everypage
        html = rq.get(url).content
        # this has the data for the one page, then add it to the total data collection
        data = data + extract_info(html)

        print(" page " + str(i+1) + " for [" + keyword +  "] is complete ✅")
    
    # now we will do something with the data
    save_data(keyword, data)




# search maral adal with 3 pages
search('maral adal', 10)
search('python', 10)
search('mirzatsoft', 10)