Skip to content

Commit 0216bcc

Browse files
committed
fix: ollama skip tests, pyproject whitespace diffs
1 parent a88c93b commit 0216bcc

File tree

3 files changed

+204
-153
lines changed

3 files changed

+204
-153
lines changed

pyproject.toml

Lines changed: 60 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,10 @@ dynamic = ["version"]
88
description = "A model-driven approach to building AI agents in just a few lines of code"
99
readme = "README.md"
1010
requires-python = ">=3.10"
11-
license = { text = "Apache-2.0" }
12-
authors = [{ name = "AWS", email = "[email protected]" }]
11+
license = {text = "Apache-2.0"}
12+
authors = [
13+
{name = "AWS", email = "[email protected]"},
14+
]
1315
classifiers = [
1416
"Development Status :: 3 - Alpha",
1517
"Intended Audience :: Developers",
@@ -26,7 +28,7 @@ classifiers = [
2628
dependencies = [
2729
"boto3>=1.26.0,<2.0.0",
2830
"botocore>=1.29.0,<2.0.0",
29-
"docstring_parser>=0.15,<0.16.0",
31+
"docstring_parser>=0.15,<1.0",
3032
"mcp>=1.8.0,<2.0.0",
3133
"pydantic>=2.0.0,<3.0.0",
3234
"typing-extensions>=4.13.2,<5.0.0",
@@ -44,7 +46,9 @@ Documentation = "https://strandsagents.com"
4446
packages = ["src/strands"]
4547

4648
[project.optional-dependencies]
47-
anthropic = ["anthropic>=0.21.0,<1.0.0"]
49+
anthropic = [
50+
"anthropic>=0.21.0,<1.0.0",
51+
]
4852
dev = [
4953
"commitizen>=4.4.0,<5.0.0",
5054
"hatch>=1.0.0,<2.0.0",
@@ -61,7 +65,7 @@ docs = [
6165
"sphinx-autodoc-typehints>=1.12.0,<2.0.0",
6266
]
6367
litellm = [
64-
"litellm>=1.69.0,<2.0.0",
68+
"litellm>=1.72.6,<2.0.0",
6569
]
6670
llamaapi = [
6771
"llama-api-client>=0.1.0,<1.0.0",
@@ -90,16 +94,25 @@ source = "vcs"
9094
[tool.hatch.envs.hatch-static-analysis]
9195
features = ["anthropic", "litellm", "llamaapi", "ollama", "openai", "otel"]
9296
dependencies = [
93-
"mypy>=1.15.0,<2.0.0",
94-
"ruff>=0.11.6,<0.12.0",
95-
"strands-agents @ {root:uri}",
97+
"mypy>=1.15.0,<2.0.0",
98+
"ruff>=0.11.6,<0.12.0",
99+
"strands-agents @ {root:uri}"
96100
]
97101

98102
[tool.hatch.envs.hatch-static-analysis.scripts]
99-
format-check = ["ruff format --check"]
100-
format-fix = ["ruff format"]
101-
lint-check = ["ruff check", "mypy -p src"]
102-
lint-fix = ["ruff check --fix"]
103+
format-check = [
104+
"ruff format --check"
105+
]
106+
format-fix = [
107+
"ruff format"
108+
]
109+
lint-check = [
110+
"ruff check",
111+
"mypy -p src"
112+
]
113+
lint-fix = [
114+
"ruff check --fix"
115+
]
103116

104117
[tool.hatch.envs.hatch-test]
105118
features = ["anthropic", "litellm", "llamaapi", "ollama", "openai", "otel"]
@@ -110,7 +123,11 @@ extra-dependencies = [
110123
"pytest-cov>=4.1.0,<5.0.0",
111124
"pytest-xdist>=3.0.0,<4.0.0",
112125
]
113-
extra-args = ["-n", "auto", "-vv"]
126+
extra-args = [
127+
"-n",
128+
"auto",
129+
"-vv",
130+
]
114131

115132
[tool.hatch.envs.dev]
116133
dev-mode = true
@@ -120,14 +137,17 @@ features = ["dev", "docs", "anthropic", "litellm", "llamaapi", "ollama", "otel"]
120137
dev-mode = true
121138
features = ["dev", "docs", "anthropic", "litellm", "llamaapi", "ollama", "a2a"]
122139

140+
123141
[[tool.hatch.envs.hatch-test.matrix]]
124142
python = ["3.13", "3.12", "3.11", "3.10"]
125143

126144

127145
[tool.hatch.envs.hatch-test.scripts]
128-
run = ["pytest{env:HATCH_TEST_ARGS:} {args}"]
146+
run = [
147+
"pytest{env:HATCH_TEST_ARGS:} {args}"
148+
]
129149
run-cov = [
130-
"pytest{env:HATCH_TEST_ARGS:} --cov --cov-config=pyproject.toml {args}",
150+
"pytest{env:HATCH_TEST_ARGS:} --cov --cov-config=pyproject.toml {args}"
131151
]
132152

133153
cov-combine = []
@@ -184,22 +204,17 @@ ignore_missing_imports = true
184204

185205
[tool.ruff]
186206
line-length = 120
187-
include = [
188-
"examples/**/*.py",
189-
"src/**/*.py",
190-
"tests/**/*.py",
191-
"tests-integ/**/*.py",
192-
]
207+
include = ["examples/**/*.py", "src/**/*.py", "tests/**/*.py", "tests-integ/**/*.py"]
193208

194209
[tool.ruff.lint]
195210
select = [
196-
"B", # flake8-bugbear
197-
"D", # pydocstyle
198-
"E", # pycodestyle
199-
"F", # pyflakes
200-
"G", # logging format
201-
"I", # isort
202-
"LOG", # logging
211+
"B", # flake8-bugbear
212+
"D", # pydocstyle
213+
"E", # pycodestyle
214+
"F", # pyflakes
215+
"G", # logging format
216+
"I", # isort
217+
"LOG", # logging
203218
]
204219

205220
[tool.ruff.lint.per-file-ignores]
@@ -209,7 +224,9 @@ select = [
209224
convention = "google"
210225

211226
[tool.pytest.ini_options]
212-
testpaths = ["tests"]
227+
testpaths = [
228+
"tests"
229+
]
213230
asyncio_default_fixture_loop_scope = "function"
214231

215232
[tool.coverage.run]
@@ -232,47 +249,19 @@ output = "build/coverage/coverage.xml"
232249
name = "cz_conventional_commits"
233250
tag_format = "v$version"
234251
bump_message = "chore(release): bump version $current_version -> $new_version"
235-
version_files = ["pyproject.toml:version"]
252+
version_files = [
253+
"pyproject.toml:version",
254+
]
236255
update_changelog_on_bump = true
237256
style = [
238-
[
239-
"qmark",
240-
"fg:#ff9d00 bold",
241-
],
242-
[
243-
"question",
244-
"bold",
245-
],
246-
[
247-
"answer",
248-
"fg:#ff9d00 bold",
249-
],
250-
[
251-
"pointer",
252-
"fg:#ff9d00 bold",
253-
],
254-
[
255-
"highlighted",
256-
"fg:#ff9d00 bold",
257-
],
258-
[
259-
"selected",
260-
"fg:#cc5454",
261-
],
262-
[
263-
"separator",
264-
"fg:#cc5454",
265-
],
266-
[
267-
"instruction",
268-
"",
269-
],
270-
[
271-
"text",
272-
"",
273-
],
274-
[
275-
"disabled",
276-
"fg:#858585 italic",
277-
],
278-
]
257+
["qmark", "fg:#ff9d00 bold"],
258+
["question", "bold"],
259+
["answer", "fg:#ff9d00 bold"],
260+
["pointer", "fg:#ff9d00 bold"],
261+
["highlighted", "fg:#ff9d00 bold"],
262+
["selected", "fg:#cc5454"],
263+
["separator", "fg:#cc5454"],
264+
["instruction", ""],
265+
["text", ""],
266+
["disabled", "fg:#858585 italic"]
267+
]

tests-integ/test_model_ollama.py

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,13 @@
66
from strands.models.ollama import OllamaModel
77

88

9+
def is_server_available() -> bool:
10+
try:
11+
return requests.get("http://localhost:11434").ok
12+
except requests.exceptions.ConnectionError:
13+
return False
14+
15+
916
@pytest.fixture
1017
def model():
1118
return OllamaModel(host="http://localhost:11434", model_id="llama3.3:70b")
@@ -16,22 +23,20 @@ def agent(model):
1623
return Agent(model=model)
1724

1825

19-
@pytest.mark.skipif(
20-
not requests.get("http://localhost:11434/api/health").ok,
21-
reason="Local Ollama endpoint not available at localhost:11434",
22-
)
26+
@pytest.mark.skipif(not is_server_available(), reason="Local Ollama endpoint not available at localhost:11434")
2327
def test_agent(agent):
2428
result = agent("Say 'hello world' with no other text")
25-
assert isinstance(result, str)
29+
assert isinstance(result.message["content"][0]["text"], str)
2630

2731

28-
@pytest.mark.skipif(
29-
not requests.get("http://localhost:11434/api/health").ok,
30-
reason="Local Ollama endpoint not available at localhost:11434",
31-
)
32+
@pytest.mark.skipif(not is_server_available(), reason="Local Ollama endpoint not available at localhost:11434")
3233
def test_structured_output(agent):
3334
class Weather(BaseModel):
34-
"""Extract the time and weather from the response with the exact strings."""
35+
"""Extract the time and weather.
36+
37+
Time format: HH:MM
38+
Weather: sunny, cloudy, rainy, etc.
39+
"""
3540

3641
time: str
3742
weather: str

0 commit comments

Comments
 (0)