Skip to content

Enable ruff S113 rule #11375

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Apr 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion machine_learning/linear_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@ def collect_dataset():
"""
response = requests.get(
"https://raw.githubusercontent.com/yashLadha/The_Math_of_Intelligence/"
"master/Week1/ADRvsRating.csv"
"master/Week1/ADRvsRating.csv",
timeout=10,
)
lines = response.text.splitlines()
data = []
Expand Down
1 change: 0 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule
"RUF002", # Docstring contains ambiguous {}. Did you mean {}?
"RUF003", # Comment contains ambiguous {}. Did you mean {}?
"S101", # Use of `assert` detected -- DO NOT FIX
"S113", # Probable use of requests call without timeout -- FIX ME
"S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME
"SLF001", # Private member accessed: `_Iterator` -- FIX ME
"UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX
Expand Down
2 changes: 1 addition & 1 deletion scripts/validate_solutions.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def added_solution_file_path() -> list[pathlib.Path]:
"Accept": "application/vnd.github.v3+json",
"Authorization": "token " + os.environ["GITHUB_TOKEN"],
}
files = requests.get(get_files_url(), headers=headers).json()
files = requests.get(get_files_url(), headers=headers, timeout=10).json()
for file in files:
filepath = pathlib.Path.cwd().joinpath(file["filename"])
if (
Expand Down
4 changes: 2 additions & 2 deletions web_programming/co2_emission.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,13 @@

# Emission in the last half hour
def fetch_last_half_hour() -> str:
last_half_hour = requests.get(BASE_URL).json()["data"][0]
last_half_hour = requests.get(BASE_URL, timeout=10).json()["data"][0]
return last_half_hour["intensity"]["actual"]


# Emissions in a specific date range
def fetch_from_to(start, end) -> list:
return requests.get(f"{BASE_URL}/{start}/{end}").json()["data"]
return requests.get(f"{BASE_URL}/{start}/{end}", timeout=10).json()["data"]


if __name__ == "__main__":
Expand Down
4 changes: 3 additions & 1 deletion web_programming/covid_stats_via_xpath.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ class CovidData(NamedTuple):

def covid_stats(url: str = "https://www.worldometers.info/coronavirus/") -> CovidData:
xpath_str = '//div[@class = "maincounter-number"]/span/text()'
return CovidData(*html.fromstring(requests.get(url).content).xpath(xpath_str))
return CovidData(
*html.fromstring(requests.get(url, timeout=10).content).xpath(xpath_str)
)


fmt = """Total COVID-19 cases in the world: {}
Expand Down
2 changes: 1 addition & 1 deletion web_programming/crawl_google_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
if __name__ == "__main__":
print("Googling.....")
url = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
res = requests.get(url, headers={"UserAgent": UserAgent().random})
res = requests.get(url, headers={"UserAgent": UserAgent().random}, timeout=10)
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
Expand Down
4 changes: 3 additions & 1 deletion web_programming/crawl_google_scholar_citation.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,9 @@ def get_citation(base_url: str, params: dict) -> str:
"""
Return the citation number.
"""
soup = BeautifulSoup(requests.get(base_url, params=params).content, "html.parser")
soup = BeautifulSoup(
requests.get(base_url, params=params, timeout=10).content, "html.parser"
)
div = soup.find("div", attrs={"class": "gs_ri"})
anchors = div.find("div", attrs={"class": "gs_fl"}).find_all("a")
return anchors[2].get_text()
Expand Down
2 changes: 1 addition & 1 deletion web_programming/currency_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ def convert_currency(
params = locals()
# from is a reserved keyword
params["from"] = params.pop("from_")
res = requests.get(URL_BASE, params=params).json()
res = requests.get(URL_BASE, params=params, timeout=10).json()
return str(res["amount"]) if res["error"] == 0 else res["error_message"]


Expand Down
4 changes: 3 additions & 1 deletion web_programming/current_stock_price.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,9 @@

def stock_price(symbol: str = "AAPL") -> str:
url = f"https://finance.yahoo.com/quote/{symbol}?p={symbol}"
yahoo_finance_source = requests.get(url, headers={"USER-AGENT": "Mozilla/5.0"}).text
yahoo_finance_source = requests.get(
url, headers={"USER-AGENT": "Mozilla/5.0"}, timeout=10
).text
soup = BeautifulSoup(yahoo_finance_source, "html.parser")
specific_fin_streamer_tag = soup.find("fin-streamer", {"data-test": "qsp-price"})

Expand Down
4 changes: 2 additions & 2 deletions web_programming/current_weather.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,13 @@ def current_weather(location: str) -> list[dict]:
if OPENWEATHERMAP_API_KEY:
params_openweathermap = {"q": location, "appid": OPENWEATHERMAP_API_KEY}
response_openweathermap = requests.get(
OPENWEATHERMAP_URL_BASE, params=params_openweathermap
OPENWEATHERMAP_URL_BASE, params=params_openweathermap, timeout=10
)
weather_data.append({"OpenWeatherMap": response_openweathermap.json()})
if WEATHERSTACK_API_KEY:
params_weatherstack = {"query": location, "access_key": WEATHERSTACK_API_KEY}
response_weatherstack = requests.get(
WEATHERSTACK_URL_BASE, params=params_weatherstack
WEATHERSTACK_URL_BASE, params=params_weatherstack, timeout=10
)
weather_data.append({"Weatherstack": response_weatherstack.json()})
if not weather_data:
Expand Down
2 changes: 1 addition & 1 deletion web_programming/daily_horoscope.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ def horoscope(zodiac_sign: int, day: str) -> str:
"https://www.horoscope.com/us/horoscopes/general/"
f"horoscope-general-daily-{day}.aspx?sign={zodiac_sign}"
)
soup = BeautifulSoup(requests.get(url).content, "html.parser")
soup = BeautifulSoup(requests.get(url, timeout=10).content, "html.parser")
return soup.find("div", class_="main-horoscope").p.text


Expand Down
4 changes: 3 additions & 1 deletion web_programming/download_images_from_google_query.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,9 @@ def download_images_from_google_query(query: str = "dhaka", max_images: int = 5)
"ijn": "0",
}

html = requests.get("https://www.google.com/search", params=params, headers=headers)
html = requests.get(
"https://www.google.com/search", params=params, headers=headers, timeout=10
)
soup = BeautifulSoup(html.text, "html.parser")
matched_images_data = "".join(
re.findall(r"AF_initDataCallback\(([^<]+)\);", str(soup.select("script")))
Expand Down
4 changes: 2 additions & 2 deletions web_programming/emails_from_url.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def emails_from_url(url: str = "https://github.com") -> list[str]:

try:
# Open URL
r = requests.get(url)
r = requests.get(url, timeout=10)

# pass the raw HTML to the parser to get links
parser.feed(r.text)
Expand All @@ -88,7 +88,7 @@ def emails_from_url(url: str = "https://github.com") -> list[str]:
# open URL.
# read = requests.get(link)
try:
read = requests.get(link)
read = requests.get(link, timeout=10)
# Get the valid email.
emails = re.findall("[a-zA-Z0-9]+@" + domain, read.text)
# If not in list then append it.
Expand Down
8 changes: 5 additions & 3 deletions web_programming/fetch_anime_and_play.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def search_scraper(anime_name: str) -> list:
search_url = f"{BASE_URL}/search/{anime_name}"

response = requests.get(
search_url, headers={"UserAgent": UserAgent().chrome}
search_url, headers={"UserAgent": UserAgent().chrome}, timeout=10
) # request the url.

# Is the response ok?
Expand Down Expand Up @@ -82,7 +82,9 @@ def search_anime_episode_list(episode_endpoint: str) -> list:

request_url = f"{BASE_URL}{episode_endpoint}"

response = requests.get(url=request_url, headers={"UserAgent": UserAgent().chrome})
response = requests.get(
url=request_url, headers={"UserAgent": UserAgent().chrome}, timeout=10
)
response.raise_for_status()

soup = BeautifulSoup(response.text, "html.parser")
Expand Down Expand Up @@ -132,7 +134,7 @@ def get_anime_episode(episode_endpoint: str) -> list:
episode_page_url = f"{BASE_URL}{episode_endpoint}"

response = requests.get(
url=episode_page_url, headers={"User-Agent": UserAgent().chrome}
url=episode_page_url, headers={"User-Agent": UserAgent().chrome}, timeout=10
)
response.raise_for_status()

Expand Down
2 changes: 1 addition & 1 deletion web_programming/fetch_bbc_news.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

def fetch_bbc_news(bbc_news_api_key: str) -> None:
# fetching a list of articles in json format
bbc_news_page = requests.get(_NEWS_API + bbc_news_api_key).json()
bbc_news_page = requests.get(_NEWS_API + bbc_news_api_key, timeout=10).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"], 1):
print(f"{i}.) {article['title']}")
Expand Down
2 changes: 1 addition & 1 deletion web_programming/fetch_github_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def fetch_github_info(auth_token: str) -> dict[Any, Any]:
"Authorization": f"token {auth_token}",
"Accept": "application/vnd.github.v3+json",
}
return requests.get(AUTHENTICATED_USER_ENDPOINT, headers=headers).json()
return requests.get(AUTHENTICATED_USER_ENDPOINT, headers=headers, timeout=10).json()


if __name__ == "__main__": # pragma: no cover
Expand Down
4 changes: 3 additions & 1 deletion web_programming/fetch_jobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,9 @@


def fetch_jobs(location: str = "mumbai") -> Generator[tuple[str, str], None, None]:
soup = BeautifulSoup(requests.get(url + location).content, "html.parser")
soup = BeautifulSoup(
requests.get(url + location, timeout=10).content, "html.parser"
)
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div", attrs={"data-tn-component": "organicJob"}):
job_title = job.find("a", attrs={"data-tn-element": "jobTitle"}).text.strip()
Expand Down
4 changes: 2 additions & 2 deletions web_programming/fetch_quotes.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,11 @@


def quote_of_the_day() -> list:
return requests.get(API_ENDPOINT_URL + "/today").json()
return requests.get(API_ENDPOINT_URL + "/today", timeout=10).json()


def random_quotes() -> list:
return requests.get(API_ENDPOINT_URL + "/random").json()
return requests.get(API_ENDPOINT_URL + "/random", timeout=10).json()


if __name__ == "__main__":
Expand Down
2 changes: 1 addition & 1 deletion web_programming/fetch_well_rx_price.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def fetch_pharmacy_and_price_list(drug_name: str, zip_code: str) -> list | None:
return None

request_url = BASE_URL.format(drug_name, zip_code)
response = get(request_url)
response = get(request_url, timeout=10)

# Is the response ok?
response.raise_for_status()
Expand Down
4 changes: 3 additions & 1 deletion web_programming/get_amazon_product_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,9 @@ def get_amazon_product_data(product: str = "laptop") -> DataFrame:
),
"Accept-Language": "en-US, en;q=0.5",
}
soup = BeautifulSoup(requests.get(url, headers=header).text, features="lxml")
soup = BeautifulSoup(
requests.get(url, headers=header, timeout=10).text, features="lxml"
)
# Initialize a Pandas dataframe with the column titles
data_frame = DataFrame(
columns=[
Expand Down
2 changes: 1 addition & 1 deletion web_programming/get_imdb_top_250_movies_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

def get_imdb_top_250_movies(url: str = "") -> dict[str, float]:
url = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
soup = BeautifulSoup(requests.get(url).text, "html.parser")
soup = BeautifulSoup(requests.get(url, timeout=10).text, "html.parser")
titles = soup.find_all("td", attrs="titleColumn")
ratings = soup.find_all("td", class_="ratingColumn imdbRating")
return {
Expand Down
2 changes: 1 addition & 1 deletion web_programming/get_ip_geolocation.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ def get_ip_geolocation(ip_address: str) -> str:
url = f"https://ipinfo.io/{ip_address}/json"

# Send a GET request to the API
response = requests.get(url)
response = requests.get(url, timeout=10)

# Check if the HTTP request was successful
response.raise_for_status()
Expand Down
2 changes: 1 addition & 1 deletion web_programming/get_top_billionaires.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def get_forbes_real_time_billionaires() -> list[dict[str, int | str]]:
Returns:
List of top 10 realtime billionaires data.
"""
response_json = requests.get(API_URL).json()
response_json = requests.get(API_URL, timeout=10).json()
return [
{
"Name": person["personName"],
Expand Down
4 changes: 2 additions & 2 deletions web_programming/get_top_hn_posts.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,15 @@

def get_hackernews_story(story_id: str) -> dict:
url = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(url).json()
return requests.get(url, timeout=10).json()


def hackernews_top_stories(max_stories: int = 10) -> list[dict]:
"""
Get the top max_stories posts from HackerNews - https://news.ycombinator.com/
"""
url = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
story_ids = requests.get(url).json()[:max_stories]
story_ids = requests.get(url, timeout=10).json()[:max_stories]
return [get_hackernews_story(story_id) for story_id in story_ids]


Expand Down
2 changes: 1 addition & 1 deletion web_programming/giphy.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ def get_gifs(query: str, api_key: str = giphy_api_key) -> list:
"""
formatted_query = "+".join(query.split())
url = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"
gifs = requests.get(url).json()["data"]
gifs = requests.get(url, timeout=10).json()["data"]
return [gif["url"] for gif in gifs]


Expand Down
2 changes: 1 addition & 1 deletion web_programming/instagram_crawler.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def get_json(self) -> dict:
"""
Return a dict of user information
"""
html = requests.get(self.url, headers=headers).text
html = requests.get(self.url, headers=headers, timeout=10).text
scripts = BeautifulSoup(html, "html.parser").find_all("script")
try:
return extract_user_profile(scripts[4])
Expand Down
4 changes: 2 additions & 2 deletions web_programming/instagram_pic.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def download_image(url: str) -> str:
A message indicating the result of the operation.
"""
try:
response = requests.get(url)
response = requests.get(url, timeout=10)
response.raise_for_status()
except requests.exceptions.RequestException as e:
return f"An error occurred during the HTTP request to {url}: {e!r}"
Expand All @@ -30,7 +30,7 @@ def download_image(url: str) -> str:
return f"Image URL not found in meta tag {image_meta_tag}."

try:
image_data = requests.get(image_url).content
image_data = requests.get(image_url, timeout=10).content
except requests.exceptions.RequestException as e:
return f"An error occurred during the HTTP request to {image_url}: {e!r}"
if not image_data:
Expand Down
4 changes: 2 additions & 2 deletions web_programming/instagram_video.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@

def download_video(url: str) -> bytes:
base_url = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
video_url = requests.get(base_url + url).json()[0]["urls"][0]["src"]
return requests.get(video_url).content
video_url = requests.get(base_url + url, timeout=10).json()[0]["urls"][0]["src"]
return requests.get(video_url, timeout=10).content


if __name__ == "__main__":
Expand Down
6 changes: 3 additions & 3 deletions web_programming/nasa_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,14 @@ def get_apod_data(api_key: str) -> dict:
Get your API Key from: https://api.nasa.gov/
"""
url = "https://api.nasa.gov/planetary/apod"
return requests.get(url, params={"api_key": api_key}).json()
return requests.get(url, params={"api_key": api_key}, timeout=10).json()


def save_apod(api_key: str, path: str = ".") -> dict:
apod_data = get_apod_data(api_key)
img_url = apod_data["url"]
img_name = img_url.split("/")[-1]
response = requests.get(img_url, stream=True)
response = requests.get(img_url, stream=True, timeout=10)

with open(f"{path}/{img_name}", "wb+") as img_file:
shutil.copyfileobj(response.raw, img_file)
Expand All @@ -29,7 +29,7 @@ def get_archive_data(query: str) -> dict:
Get the data of a particular query from NASA archives
"""
url = "https://images-api.nasa.gov/search"
return requests.get(url, params={"q": query}).json()
return requests.get(url, params={"q": query}, timeout=10).json()


if __name__ == "__main__":
Expand Down
1 change: 1 addition & 0 deletions web_programming/open_google_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
res = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
timeout=10,
)

try:
Expand Down
6 changes: 4 additions & 2 deletions web_programming/random_anime_character.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ def save_image(image_url: str, image_title: str) -> None:
"""
Saves the image of anime character
"""
image = requests.get(image_url, headers=headers)
image = requests.get(image_url, headers=headers, timeout=10)
with open(image_title, "wb") as file:
file.write(image.content)

Expand All @@ -21,7 +21,9 @@ def random_anime_character() -> tuple[str, str, str]:
"""
Returns the Title, Description, and Image Title of a random anime character .
"""
soup = BeautifulSoup(requests.get(URL, headers=headers).text, "html.parser")
soup = BeautifulSoup(
requests.get(URL, headers=headers, timeout=10).text, "html.parser"
)
title = soup.find("meta", attrs={"property": "og:title"}).attrs["content"]
image_url = soup.find("meta", attrs={"property": "og:image"}).attrs["content"]
description = soup.find("p", id="description").get_text()
Expand Down
4 changes: 3 additions & 1 deletion web_programming/recaptcha_verification.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,9 @@ def login_using_recaptcha(request):
client_key = request.POST.get("g-recaptcha-response")

# post recaptcha response to Google's recaptcha api
response = requests.post(url, data={"secret": secret_key, "response": client_key})
response = requests.post(
url, data={"secret": secret_key, "response": client_key}, timeout=10
)
# if the recaptcha api verified our keys
if response.json().get("success", False):
# authenticate the user
Expand Down
Loading