from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
import requests
import time
import pickle
import pandas
from tqdm import tqdm

class reptile_sensortower():
    
    def __init__(self) -> None:

        self.server = 'https://app.sensortower.com'
        self.target_url = 'https://app.sensortower.com/search?locale=zh-CN&os=android&device=android&entity_type=app&entities_per_page=250&current_page=1&term=AI&search_by=name&category=photography'
        self._urls = []
        self._data = []

    def get_html(self, url):
        service = Service(executable_path='D:\GeckoDriver\chromedriver-win64\chromedriver.exe')
        options = webdriver.ChromeOptions()
        options.add_argument(r'user-data-dir=C:\Users\sonow\AppData\Local\Google\Chrome\User Data')
        options.add_argument('--headless') # 隐藏浏览器界面
        browser = webdriver.Chrome(options=options, service=service)
        browser.get(url)
        browser.execute_script("return document.documentElement.outerHTML")
        return browser
    
    def get_urls(self): #加载完整的HTML
        browser = self.get_html(url=self.target_url)
        html_table_tbody = browser.find_element('xpath', "//tbody[@class='search-results-table-body']").get_attribute('outerHTML')
        soup = BeautifulSoup(html_table_tbody, 'lxml')
        self._urls = [self.server+f.get('href') for f in soup.find_all('a', class_='entity-name') if f.get('href')]

    def get_data(self, url):
        headers = {
            'Cookie':'locale=zh-CN; device_id=683cbc6e-bb43-43df-8949-02dbad955832; sensor_tower_session=bcf6e948a7acaecf07804a0bd88a60bd; osano_consentmanager_uuid=fb638b1e-bc68-42f4-afac-706d02d5951f; osano_consentmanager=S1jcOL8E4fhmyIhISdxyteZNb4OQaioEqOz9KUvhbUdgXxti5Q2VSarCs3e2vajSpp1lJHoH2ukq78ScxI1nh9VcimXGYhUIY6vk7FDG7by2NFf-Nx0bku7RvJGRxqrifpGyn3Sv5tYT0UKdcQY7iT6Wx4Ma8N_l7G7nWnlEFh63SmcN6zybx5RocsLOMUEDRjMKbVAYMumVRXgSCqI-gJQBFQXBQS3kVSdmyNJVpcONrmU6SGuz4v4C0eaTXcETX12VneP7yP4x8Ia2k-ErZ5n_5lYQXG1kbhG8FQ==; _ga=GA1.1.2080279838.1704682469; _ga_FDNER2EVFL=GS1.1.1704780243.2.1.1704782039.0.0.0; amplitude_id_6edb64137a31fa337b6f553dbccf2d8bsensortower.com=eyJkZXZpY2VJZCI6IjY0ZWFiM2M0LWE4MGYtNGRhYS1iYTY0LTlhODc4NjZmNjRiY1IiLCJ1c2VySWQiOiJsZWl4aWFucGluZ0BhZGVzay5jb20iLCJvcHRPdXQiOmZhbHNlLCJzZXNzaW9uSWQiOjE3MDQ3Nzk3Mjc4MjMsImxhc3RFdmVudFRpbWUiOjE3MDQ3ODI1MjE0OTQsImV2ZW50SWQiOjk0LCJpZGVudGlmeUlkIjoxLCJzZXF1ZW5jZU51bWJlciI6OTV9',
            'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
        }
        if not requests.get(url, headers=headers).ok:
            time.sleep(5)
            return 
        browser = self.get_html(url=url)
        time.sleep(5)
        html = browser.find_element('xpath', '//div[@id="mainContent"]').get_attribute('outerHTML')
        soup = BeautifulSoup(html, 'lxml')
        _ = {}
        app_name = soup.find('h2',class_='MuiTypography-root MuiTypography-h2 AppOverviewSubappDetailsHeader-module__appName--hKwll css-1fjsqn2').get_text(strip=True)
        div_developer = soup.find('h4', string='开发者网站')
        website = div_developer.find_next_sibling('div').find('a').get('href') if div_developer else ''
        downloads = soup.find('h4', string='下载量').find_next_sibling('div').find('span').get_text(strip=True)
        income = soup.find('h4', string='收入').find_next_sibling('div').find('span').get_text(strip=True)
        div_description = soup.find_all('div', class_='MuiTypography-root MuiTypography-body1 AppOverviewSubappAboutDescription-module__value--Pij7B css-19d5dex')[1]
        description = div_description.get_text(strip=True) if div_description else ''
        description = ''.join(line for line in description.splitlines())
        _['app_name'] = app_name
        _['website'] = website
        _['downloads'] = downloads
        _['income'] = income
        _['description'] = description
        self._data.append(_)


    @staticmethod
    def writter(data, filename='data', mode='csv'):
        if mode=='csv':
            df = pandas.DataFrame(data)
            df.to_csv(mode+'_'+filename+'.csv', index=False)
        elif mode=='binary':
            with open(mode+'_'+filename+'.pkl', mode='wb') as f:
                pickle.dump(data, f)

if __name__=='__main__':
    sensor = reptile_sensortower()
    sensor.get_urls()
    for url in sensor._urls:
        print(url)
        sensor.get_data(url)
        print(sensor._data)
    sensor.writter(data=sensor._data)
    

        
