function
stringlengths
18
3.86k
intent_category
stringlengths
5
24
def get_imdb_top_movies(num_movies: int = 5) -> tuple: num_movies = int(float(num_movies)) if num_movies < 1: return () base_url = ( "https://www.imdb.com/search/title?title_type=" f"feature&sort=num_votes,desc&count={num_movies}" ) source = bs4.BeautifulSoup(requests.get(base_url).content, "html.parser") return tuple( get_movie_data_from_soup(movie) for movie in source.find_all("div", class_="lister-item mode-advanced") )
web_programming
def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict: new_olid = olid.strip().strip("/") # Remove leading/trailing whitespace & slashes if new_olid.count("/") != 1: raise ValueError(f"{olid} is not a valid Open Library olid") return requests.get(f"https://openlibrary.org/{new_olid}.json").json()
web_programming
def summarize_book(ol_book_data: dict) -> dict: desired_keys = { "title": "Title", "publish_date": "Publish date", "authors": "Authors", "number_of_pages": "Number of pages:", "first_sentence": "First sentence", "isbn_10": "ISBN (10)", "isbn_13": "ISBN (13)", } data = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} data["Authors"] = [ get_openlibrary_data(author["key"])["name"] for author in data["Authors"] ] data["First sentence"] = data["First sentence"]["value"] for key, value in data.items(): if isinstance(value, list): data[key] = ", ".join(value) return data
web_programming
def extract_user_profile(script) -> dict: data = script.contents[0] info = json.loads(data[data.find('{"config"') : -1]) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
web_programming
def __init__(self, username): self.url = f"https://www.instagram.com/{username}/" self.user_data = self.get_json()
web_programming
def get_json(self) -> dict: html = requests.get(self.url, headers=headers).text scripts = BeautifulSoup(html, "html.parser").find_all("script") try: return extract_user_profile(scripts[4]) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3])
web_programming
def __repr__(self) -> str: return f"{self.__class__.__name__}('{self.username}')"
web_programming
def __str__(self) -> str: return f"{self.fullname} ({self.username}) is {self.biography}"
web_programming
def username(self) -> str: return self.user_data["username"]
web_programming
def fullname(self) -> str: return self.user_data["full_name"]
web_programming
def biography(self) -> str: return self.user_data["biography"]
web_programming
def email(self) -> str: return self.user_data["business_email"]
web_programming
def website(self) -> str: return self.user_data["external_url"]
web_programming
def number_of_followers(self) -> int: return self.user_data["edge_followed_by"]["count"]
web_programming
def number_of_followings(self) -> int: return self.user_data["edge_follow"]["count"]
web_programming
def number_of_posts(self) -> int: return self.user_data["edge_owner_to_timeline_media"]["count"]
web_programming
def profile_picture_url(self) -> str: return self.user_data["profile_pic_url_hd"]
web_programming
def is_verified(self) -> bool: return self.user_data["is_verified"]
web_programming
def is_private(self) -> bool: return self.user_data["is_private"]
web_programming
def test_instagram_user(username: str = "github") -> None: import os if os.environ.get("CI"): return # test failing on GitHub Actions instagram_user = InstagramUser(username) assert instagram_user.user_data assert isinstance(instagram_user.user_data, dict) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 120000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("https://instagram.") assert instagram_user.is_verified is True assert instagram_user.is_private is False
web_programming
def world_covid19_stats(url: str = "https://www.worldometers.info/coronavirus") -> dict: soup = BeautifulSoup(requests.get(url).text, "html.parser") keys = soup.findAll("h1") values = soup.findAll("div", {"class": "maincounter-number"}) keys += soup.findAll("span", {"class": "panel-title"}) values += soup.findAll("div", {"class": "number-table-main"}) return {key.text.strip(): value.text.strip() for key, value in zip(keys, values)}
web_programming
def save_image(image_url: str, image_title: str) -> None: image = requests.get(image_url, headers=headers) with open(image_title, "wb") as file: file.write(image.content)
web_programming
def random_anime_character() -> tuple[str, str, str]: soup = BeautifulSoup(requests.get(URL, headers=headers).text, "html.parser") title = soup.find("meta", attrs={"property": "og:title"}).attrs["content"] image_url = soup.find("meta", attrs={"property": "og:image"}).attrs["content"] description = soup.find("p", id="description").get_text() _, image_extension = os.path.splitext(os.path.basename(image_url)) image_title = title.strip().replace(" ", "_") image_title = f"{image_title}{image_extension}" save_image(image_url, image_title) return (title, description, image_title)
web_programming
def fetch_bbc_news(bbc_news_api_key: str) -> None: # fetching a list of articles in json format bbc_news_page = requests.get(_NEWS_API + bbc_news_api_key).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["articles"], 1): print(f"{i}.) {article['title']}")
web_programming
def current_weather(q: str = "Chicago", appid: str = APPID) -> dict: return requests.get(URL_BASE + "forecast", params=locals()).json()
web_programming
def get_gifs(query: str, api_key: str = giphy_api_key) -> list: formatted_query = "+".join(query.split()) url = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}" gifs = requests.get(url).json()["data"] return [gif["url"] for gif in gifs]
web_programming
def get_citation(base_url: str, params: dict) -> str: soup = BeautifulSoup(requests.get(base_url, params=params).content, "html.parser") div = soup.find("div", attrs={"class": "gs_ri"}) anchors = div.find("div", attrs={"class": "gs_fl"}).find_all("a") return anchors[2].get_text()
web_programming
def send_slack_message(message_body: str, slack_url: str) -> None: headers = {"Content-Type": "application/json"} response = requests.post(slack_url, json={"text": message_body}, headers=headers) if response.status_code != 200: raise ValueError( f"Request to slack returned an error {response.status_code}, " f"the response is:\n{response.text}" )
web_programming
def horoscope(zodiac_sign: int, day: str) -> str: url = ( "https://www.horoscope.com/us/horoscopes/general/" f"horoscope-general-daily-{day}.aspx?sign={zodiac_sign}" ) soup = BeautifulSoup(requests.get(url).content, "html.parser") return soup.find("div", class_="main-horoscope").p.text
web_programming
def download_video(url: str) -> bytes: base_url = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url=" video_url = requests.get(base_url + url).json()[0]["urls"][0]["src"] return requests.get(video_url).content
web_programming
def fetch_last_half_hour() -> str: last_half_hour = requests.get(BASE_URL).json()["data"][0] return last_half_hour["intensity"]["actual"]
web_programming
def fetch_from_to(start, end) -> list: return requests.get(f"{BASE_URL}/{start}/{end}").json()["data"]
web_programming
def fetch_github_info(auth_token: str) -> dict[Any, Any]: headers = { "Authorization": f"token {auth_token}", "Accept": "application/vnd.github.v3+json", } return requests.get(AUTHENTICATED_USER_ENDPOINT, headers=headers).json()
web_programming
def stock_price(symbol: str = "AAPL") -> str: url = f"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}" soup = BeautifulSoup(requests.get(url).text, "html.parser") class_ = "My(6px) Pos(r) smartphone_Mt(6px)" return soup.find("div", class_=class_).find("span").text
web_programming
def fetch_pharmacy_and_price_list(drug_name: str, zip_code: str) -> list | None: try: # Has user provided both inputs? if not drug_name or not zip_code: return None request_url = BASE_URL.format(drug_name, zip_code) response = get(request_url) # Is the response ok? response.raise_for_status() # Scrape the data using bs4 soup = BeautifulSoup(response.text, "html.parser") # This list will store the name and price. pharmacy_price_list = [] # Fetch all the grids that contains the items. grid_list = soup.find_all("div", {"class": "grid-x pharmCard"}) if grid_list and len(grid_list) > 0: for grid in grid_list: # Get the pharmacy price. pharmacy_name = grid.find("p", {"class": "list-title"}).text # Get price of the drug. price = grid.find("span", {"p", "price price-large"}).text pharmacy_price_list.append( { "pharmacy_name": pharmacy_name, "price": price, } ) return pharmacy_price_list except (HTTPError, exceptions.RequestException, ValueError): return None
web_programming
def download_images_from_google_query(query: str = "dhaka", max_images: int = 5) -> int: max_images = min(max_images, 50) # Prevent abuse! params = { "q": query, "tbm": "isch", "hl": "en", "ijn": "0", } html = requests.get("https://www.google.com/search", params=params, headers=headers) soup = BeautifulSoup(html.text, "html.parser") matched_images_data = "".join( re.findall(r"AF_initDataCallback\(([^<]+)\);", str(soup.select("script"))) ) matched_images_data_fix = json.dumps(matched_images_data) matched_images_data_json = json.loads(matched_images_data_fix) matched_google_image_data = re.findall( r"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",", matched_images_data_json, ) if not matched_google_image_data: return 0 removed_matched_google_images_thumbnails = re.sub( r"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]", "", str(matched_google_image_data), ) matched_google_full_resolution_images = re.findall( r"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]", removed_matched_google_images_thumbnails, ) for index, fixed_full_res_image in enumerate(matched_google_full_resolution_images): if index >= max_images: return index original_size_img_not_fixed = bytes(fixed_full_res_image, "ascii").decode( "unicode-escape" ) original_size_img = bytes(original_size_img_not_fixed, "ascii").decode( "unicode-escape" ) opener = urllib.request.build_opener() opener.addheaders = [ ( "User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" " (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582", ) ] urllib.request.install_opener(opener) path_name = f"query_{query.replace(' ', '_')}" if not os.path.exists(path_name): os.makedirs(path_name) urllib.request.urlretrieve( original_size_img, f"{path_name}/original_size_img_{index}.jpg" ) return index
web_programming
def get_subreddit_data( subreddit: str, limit: int = 1, age: str = "new", wanted_data: list | None = None ) -> dict: wanted_data = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(wanted_data) - valid_terms)): raise ValueError(f"Invalid search term: {invalid_search_terms}") response = requests.get( f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"User-agent": "A random string"}, ) if response.status_code == 429: raise requests.HTTPError data = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(limit)} data_dict = {} for id_ in range(limit): data_dict[id_] = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict
web_programming
def search_scraper(anime_name: str) -> list: # concat the name to form the search url. search_url = f"{BASE_URL}/search/{anime_name}" response = requests.get( search_url, headers={"UserAgent": UserAgent().chrome} ) # request the url. # Is the response ok? response.raise_for_status() # parse with soup. soup = BeautifulSoup(response.text, "html.parser") # get list of anime anime_ul = soup.find("ul", {"class": "items"}) anime_li = anime_ul.children # for each anime, insert to list. the name and url. anime_list = [] for anime in anime_li: if not isinstance(anime, NavigableString): try: anime_url, anime_title = ( anime.find("a")["href"], anime.find("a")["title"], ) anime_list.append( { "title": anime_title, "url": anime_url, } ) except (NotFoundErr, KeyError): pass return anime_list
web_programming
def search_anime_episode_list(episode_endpoint: str) -> list: request_url = f"{BASE_URL}{episode_endpoint}" response = requests.get(url=request_url, headers={"UserAgent": UserAgent().chrome}) response.raise_for_status() soup = BeautifulSoup(response.text, "html.parser") # With this id. get the episode list. episode_page_ul = soup.find("ul", {"id": "episode_related"}) episode_page_li = episode_page_ul.children episode_list = [] for episode in episode_page_li: try: if not isinstance(episode, NavigableString): episode_list.append( { "title": episode.find("div", {"class": "name"}).text.replace( " ", "" ), "url": episode.find("a")["href"], } ) except (KeyError, NotFoundErr): pass return episode_list
web_programming
def get_anime_episode(episode_endpoint: str) -> list: episode_page_url = f"{BASE_URL}{episode_endpoint}" response = requests.get( url=episode_page_url, headers={"User-Agent": UserAgent().chrome} ) response.raise_for_status() soup = BeautifulSoup(response.text, "html.parser") try: episode_url = soup.find("iframe", {"id": "playerframe"})["src"] download_url = episode_url.replace("/embed/", "/playlist/") + ".m3u8" except (KeyError, NotFoundErr) as e: raise e return [f"{BASE_URL}{episode_url}", f"{BASE_URL}{download_url}"]
web_programming
def get_all_tweets(screen_name: str) -> None: # authorize twitter, initialize tweepy auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_key, access_secret) api = tweepy.API(auth) # initialize a list to hold all the tweepy Tweets alltweets = [] # make initial request for most recent tweets (200 is the maximum allowed count) new_tweets = api.user_timeline(screen_name=screen_name, count=200) # save most recent tweets alltweets.extend(new_tweets) # save the id of the oldest tweet less one oldest = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(new_tweets) > 0: print(f"getting tweets before {oldest}") # all subsequent requests use the max_id param to prevent duplicates new_tweets = api.user_timeline( screen_name=screen_name, count=200, max_id=oldest ) # save most recent tweets alltweets.extend(new_tweets) # update the id of the oldest tweet less one oldest = alltweets[-1].id - 1 print(f"...{len(alltweets)} tweets downloaded so far") # transform the tweepy tweets into a 2D array that will populate the csv outtweets = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f"new_{screen_name}_tweets.csv", "w") as f: writer = csv.writer(f) writer.writerow(["id", "created_at", "text"]) writer.writerows(outtweets)
web_programming
def get_apod_data(api_key: str, download: bool = False, path: str = ".") -> dict: url = "https://api.nasa.gov/planetary/apod" return requests.get(url, params={"api_key": api_key}).json()
web_programming
def save_apod(api_key: str, path: str = ".") -> dict: apod_data = get_apod_data(api_key) img_url = apod_data["url"] img_name = img_url.split("/")[-1] response = requests.get(img_url, stream=True) with open(f"{path}/{img_name}", "wb+") as img_file: shutil.copyfileobj(response.raw, img_file) del response return apod_data
web_programming
def get_archive_data(query: str) -> dict: url = "https://images-api.nasa.gov/search" return requests.get(url, params={"q": query}).json()
web_programming
def add(*matrix_s: list[list[int]]) -> list[list[int]]: if all(_check_not_integer(m) for m in matrix_s): for i in matrix_s[1:]: _verify_matrix_sizes(matrix_s[0], i) return [[sum(t) for t in zip(*m)] for m in zip(*matrix_s)] raise TypeError("Expected a matrix, got int/list instead")
matrix
def subtract(matrix_a: list[list[int]], matrix_b: list[list[int]]) -> list[list[int]]: if ( _check_not_integer(matrix_a) and _check_not_integer(matrix_b) and _verify_matrix_sizes(matrix_a, matrix_b) ): return [[i - j for i, j in zip(*m)] for m in zip(matrix_a, matrix_b)] raise TypeError("Expected a matrix, got int/list instead")
matrix
def scalar_multiply(matrix: list[list[int]], n: int | float) -> list[list[float]]: return [[x * n for x in row] for row in matrix]
matrix
def multiply(matrix_a: list[list[int]], matrix_b: list[list[int]]) -> list[list[int]]: if _check_not_integer(matrix_a) and _check_not_integer(matrix_b): rows, cols = _verify_matrix_sizes(matrix_a, matrix_b) if cols[0] != rows[1]: raise ValueError( f"Cannot multiply matrix of dimensions ({rows[0]},{cols[0]}) " f"and ({rows[1]},{cols[1]})" ) return [ [sum(m * n for m, n in zip(i, j)) for j in zip(*matrix_b)] for i in matrix_a ]
matrix
def identity(n: int) -> list[list[int]]: n = int(n) return [[int(row == column) for column in range(n)] for row in range(n)]
matrix
def transpose( matrix: list[list[int]], return_map: bool = True ) -> list[list[int]] | map[list[int]]: if _check_not_integer(matrix): if return_map: return map(list, zip(*matrix)) else: return list(map(list, zip(*matrix))) raise TypeError("Expected a matrix, got int/list instead")
matrix
def minor(matrix: list[list[int]], row: int, column: int) -> list[list[int]]: minor = matrix[:row] + matrix[row + 1 :] return [row[:column] + row[column + 1 :] for row in minor]
matrix
def determinant(matrix: list[list[int]]) -> Any: if len(matrix) == 1: return matrix[0][0] return sum( x * determinant(minor(matrix, 0, i)) * (-1) ** i for i, x in enumerate(matrix[0]) )
matrix
def inverse(matrix: list[list[int]]) -> list[list[float]] | None: # https://stackoverflow.com/questions/20047519/python-doctests-test-for-none det = determinant(matrix) if det == 0: return None matrix_minor = [ [determinant(minor(matrix, i, j)) for j in range(len(matrix))] for i in range(len(matrix)) ] cofactors = [ [x * (-1) ** (row + col) for col, x in enumerate(matrix_minor[row])] for row in range(len(matrix)) ] adjugate = list(transpose(cofactors)) return scalar_multiply(adjugate, 1 / det)
matrix
def _check_not_integer(matrix: list[list[int]]) -> bool: return not isinstance(matrix, int) and not isinstance(matrix[0], int)
matrix
def _shape(matrix: list[list[int]]) -> tuple[int, int]: return len(matrix), len(matrix[0])
matrix
def _verify_matrix_sizes( matrix_a: list[list[int]], matrix_b: list[list[int]] ) -> tuple[tuple[int, int], tuple[int, int]]: shape = _shape(matrix_a) + _shape(matrix_b) if shape[0] != shape[3] or shape[1] != shape[2]: raise ValueError( f"operands could not be broadcast together with shape " f"({shape[0], shape[1]}), ({shape[2], shape[3]})" ) return (shape[0], shape[2]), (shape[1], shape[3])
matrix
def main() -> None: matrix_a = [[12, 10], [3, 9]] matrix_b = [[3, 4], [7, 4]] matrix_c = [[11, 12, 13, 14], [21, 22, 23, 24], [31, 32, 33, 34], [41, 42, 43, 44]] matrix_d = [[3, 0, 2], [2, 0, -2], [0, 1, 1]] print(f"Add Operation, {add(matrix_a, matrix_b) = } \n") print( f"Multiply Operation, {multiply(matrix_a, matrix_b) = } \n", ) print(f"Identity: {identity(5)}\n") print(f"Minor of {matrix_c} = {minor(matrix_c, 1, 2)} \n") print(f"Determinant of {matrix_b} = {determinant(matrix_b)} \n") print(f"Inverse of {matrix_d} = {inverse(matrix_d)}\n")
matrix
def depth_first_search(grid: list[list[int]], row: int, col: int, visit: set) -> int: row_length, col_length = len(grid), len(grid[0]) if ( min(row, col) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col)) count = 0 count += depth_first_search(grid, row + 1, col, visit) count += depth_first_search(grid, row - 1, col, visit) count += depth_first_search(grid, row, col + 1, visit) count += depth_first_search(grid, row, col - 1, visit) visit.remove((row, col)) return count
matrix
def __init__(self, row: int, column: int, default_value: float = 0) -> None: self.row, self.column = row, column self.array = [[default_value for c in range(column)] for r in range(row)]
matrix
def single_line(row_vector: list[float]) -> str: nonlocal string_format_identifier line = "[" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector) line += "]" return line
matrix
def __repr__(self) -> str: return str(self)
matrix
def validate_indicies(self, loc: tuple[int, int]) -> bool: if not (isinstance(loc, (list, tuple)) and len(loc) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True
matrix
def __getitem__(self, loc: tuple[int, int]) -> Any: assert self.validate_indicies(loc) return self.array[loc[0]][loc[1]]
matrix
def __setitem__(self, loc: tuple[int, int], value: float) -> None: assert self.validate_indicies(loc) self.array[loc[0]][loc[1]] = value
matrix
def __add__(self, another: Matrix) -> Matrix: # Validation assert isinstance(another, Matrix) assert self.row == another.row and self.column == another.column # Add result = Matrix(self.row, self.column) for r in range(self.row): for c in range(self.column): result[r, c] = self[r, c] + another[r, c] return result
matrix
def __neg__(self) -> Matrix: result = Matrix(self.row, self.column) for r in range(self.row): for c in range(self.column): result[r, c] = -self[r, c] return result
matrix
def __sub__(self, another: Matrix) -> Matrix: return self + (-another)
matrix
def __mul__(self, another: int | float | Matrix) -> Matrix: if isinstance(another, (int, float)): # Scalar multiplication result = Matrix(self.row, self.column) for r in range(self.row): for c in range(self.column): result[r, c] = self[r, c] * another return result elif isinstance(another, Matrix): # Matrix multiplication assert self.column == another.row result = Matrix(self.row, another.column) for r in range(self.row): for c in range(another.column): for i in range(self.column): result[r, c] += self[r, i] * another[i, c] return result else: raise TypeError(f"Unsupported type given for another ({type(another)})")
matrix
def transpose(self) -> Matrix: result = Matrix(self.column, self.row) for r in range(self.row): for c in range(self.column): result[c, r] = self[r, c] return result
matrix
def sherman_morrison(self, u: Matrix, v: Matrix) -> Any: # Size validation assert isinstance(u, Matrix) and isinstance(v, Matrix) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate v_t = v.transpose() numerator_factor = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
matrix
def test1() -> None: # a^(-1) ainv = Matrix(3, 3, 0) for i in range(3): ainv[i, i] = 1 print(f"a^(-1) is {ainv}") # u, v u = Matrix(3, 1, 0) u[0, 0], u[1, 0], u[2, 0] = 1, 2, -3 v = Matrix(3, 1, 0) v[0, 0], v[1, 0], v[2, 0] = 4, -2, 5 print(f"u is {u}") print(f"v is {v}") print(f"uv^T is {u * v.transpose()}") # Sherman Morrison print(f"(a + uv^T)^(-1) is {ainv.sherman_morrison(u, v)}")
matrix
def test2() -> None: import doctest doctest.testmod()
matrix
def update_area_of_max_square(row: int, col: int) -> int: # BASE CASE if row >= rows or col >= cols: return 0 right = update_area_of_max_square(row, col + 1) diagonal = update_area_of_max_square(row + 1, col + 1) down = update_area_of_max_square(row + 1, col) if mat[row][col]: sub_problem_sol = 1 + min([right, diagonal, down]) largest_square_area[0] = max(largest_square_area[0], sub_problem_sol) return sub_problem_sol else: return 0
matrix
def update_area_of_max_square_using_dp_array( row: int, col: int, dp_array: list[list[int]]
matrix
def largest_square_area_in_matrix_bottom_up( rows: int, cols: int, mat: list[list[int]] ) -> int: dp_array = [[0] * (cols + 1) for _ in range(rows + 1)] largest_square_area = 0 for row in range(rows - 1, -1, -1): for col in range(cols - 1, -1, -1): right = dp_array[row][col + 1] diagonal = dp_array[row + 1][col + 1] bottom = dp_array[row + 1][col] if mat[row][col] == 1: dp_array[row][col] = 1 + min(right, diagonal, bottom) largest_square_area = max(dp_array[row][col], largest_square_area) else: dp_array[row][col] = 0 return largest_square_area
matrix
def largest_square_area_in_matrix_bottom_up_space_optimization( rows: int, cols: int, mat: list[list[int]] ) -> int: current_row = [0] * (cols + 1) next_row = [0] * (cols + 1) largest_square_area = 0 for row in range(rows - 1, -1, -1): for col in range(cols - 1, -1, -1): right = current_row[col + 1] diagonal = next_row[col + 1] bottom = next_row[col] if mat[row][col] == 1: current_row[col] = 1 + min(right, diagonal, bottom) largest_square_area = max(current_row[col], largest_square_area) else: current_row[col] = 0 next_row = current_row return largest_square_area
matrix
def make_matrix(row_size: int = 4) -> list[list[int]]: row_size = abs(row_size) or 4 return [[1 + x + y * row_size for x in range(row_size)] for y in range(row_size)]
matrix
def rotate_90(matrix: list[list[int]]) -> list[list[int]]: return reverse_row(transpose(matrix)) # OR.. transpose(reverse_column(matrix))
matrix
def rotate_180(matrix: list[list[int]]) -> list[list[int]]: return reverse_row(reverse_column(matrix)) # OR.. reverse_column(reverse_row(matrix))
matrix
def rotate_270(matrix: list[list[int]]) -> list[list[int]]: return reverse_column(transpose(matrix)) # OR.. transpose(reverse_row(matrix))
matrix
def transpose(matrix: list[list[int]]) -> list[list[int]]: matrix[:] = [list(x) for x in zip(*matrix)] return matrix
matrix
def reverse_row(matrix: list[list[int]]) -> list[list[int]]: matrix[:] = matrix[::-1] return matrix
matrix
def reverse_column(matrix: list[list[int]]) -> list[list[int]]: matrix[:] = [x[::-1] for x in matrix] return matrix
matrix
def print_matrix(matrix: list[list[int]]) -> None: for i in matrix: print(*i)
matrix
def check_matrix(matrix: list[list[int]]) -> bool: # must be matrix = [list(row) for row in matrix] if matrix and isinstance(matrix, list): if isinstance(matrix[0], list): prev_len = 0 for row in matrix: if prev_len == 0: prev_len = len(row) result = True else: result = prev_len == len(row) else: result = True else: result = False return result
matrix
def spiral_print_clockwise(a: list[list[int]]) -> None: if check_matrix(a) and len(a) > 0: a = [list(row) for row in a] mat_row = len(a) if isinstance(a[0], list): mat_col = len(a[0]) else: for dat in a: print(dat) return # horizotal printing increasing for i in range(0, mat_col): print(a[0][i]) # vertical printing down for i in range(1, mat_row): print(a[i][mat_col - 1]) # horizotal printing decreasing if mat_row > 1: for i in range(mat_col - 2, -1, -1): print(a[mat_row - 1][i]) # vertical printing up for i in range(mat_row - 2, 0, -1): print(a[i][0]) remain_mat = [row[1 : mat_col - 1] for row in a[1 : mat_row - 1]] if len(remain_mat) > 0: spiral_print_clockwise(remain_mat) else: return else: print("Not a valid matrix") return
matrix
def spiral_traversal(matrix: list[list]) -> list[int]: if matrix: return list(matrix.pop(0)) + spiral_traversal(list(zip(*matrix))[::-1]) else: return []
matrix
def __init__(self, row: int, col: int, graph: list[list[bool]]) -> None: self.ROW = row self.COL = col self.graph = graph
matrix
def is_safe(self, i: int, j: int, visited: list[list[bool]]) -> bool: return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] )
matrix
def diffs(self, i: int, j: int, visited: list[list[bool]]) -> None: # Checking all 8 elements surrounding nth element row_nbr = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order col_nbr = [-1, 0, 1, -1, 1, -1, 0, 1] visited[i][j] = True # Make those cells visited for k in range(8): if self.is_safe(i + row_nbr[k], j + col_nbr[k], visited): self.diffs(i + row_nbr[k], j + col_nbr[k], visited)
matrix
def is_safe(row: int, col: int, rows: int, cols: int) -> bool: return 0 <= row < rows and 0 <= col < cols
matrix
def depth_first_search(row: int, col: int, seen: set, mat: list[list[int]]) -> int: rows = len(mat) cols = len(mat[0]) if is_safe(row, col, rows, cols) and (row, col) not in seen and mat[row][col] == 1: seen.add((row, col)) return ( 1 + depth_first_search(row + 1, col, seen, mat) + depth_first_search(row - 1, col, seen, mat) + depth_first_search(row, col + 1, seen, mat) + depth_first_search(row, col - 1, seen, mat) ) else: return 0
matrix
def find_max_area(mat: list[list[int]]) -> int: seen: set = set() max_area = 0 for row, line in enumerate(mat): for col, item in enumerate(line): if item == 1 and (row, col) not in seen: # Maximizing the area max_area = max(max_area, depth_first_search(row, col, seen, mat)) return max_area
matrix
def binary_search(array: list, lower_bound: int, upper_bound: int, value: int) -> int: r = int((lower_bound + upper_bound) // 2) if array[r] == value: return r if lower_bound >= upper_bound: return -1 if array[r] < value: return binary_search(array, r + 1, upper_bound, value) else: return binary_search(array, lower_bound, r - 1, value)
matrix
def mat_bin_search(value: int, matrix: list) -> list: index = 0 if matrix[index][0] == value: return [index, 0] while index < len(matrix) and matrix[index][0] < value: r = binary_search(matrix[index], 0, len(matrix[index]) - 1, value) if r != -1: return [index, r] index += 1 return [-1, -1]
matrix
def __init__(self, rows: list[list[int]]): error = TypeError( "Matrices must be formed from a list of zero or more lists containing at " "least one and the same number of values, each of which must be of type " "int or float." ) if len(rows) != 0: cols = len(rows[0]) if cols == 0: raise error for row in rows: if len(row) != cols: raise error for value in row: if not isinstance(value, (int, float)): raise error self.rows = rows else: self.rows = []
matrix
def columns(self) -> list[list[int]]: return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
matrix
def num_rows(self) -> int: return len(self.rows)
matrix
def num_columns(self) -> int: return len(self.rows[0])
matrix
def order(self) -> tuple[int, int]: return (self.num_rows, self.num_columns)
matrix