diff --git a/.cursor/rules/examples-standards.mdc b/.cursor/rules/examples-standards.mdc
index d33faab..314c283 100644
--- a/.cursor/rules/examples-standards.mdc
+++ b/.cursor/rules/examples-standards.mdc
@@ -1,6 +1,6 @@
---
-description:
-globs:
+description: Standards for creating and maintaining examples for all functionality
+globs: examples/*
---
# Examples Standards
@@ -13,8 +13,6 @@ description: Standards for creating and maintaining examples for all functionali
filters:
- type: path
pattern: "^examples/.*"
- - type: path
- pattern: "^packages/.*/.*"
actions:
- type: suggest
diff --git a/.cursor/rules/new-stackone-package.mdc b/.cursor/rules/new-stackone-package.mdc
deleted file mode 100644
index f564f06..0000000
--- a/.cursor/rules/new-stackone-package.mdc
+++ /dev/null
@@ -1,69 +0,0 @@
----
-description: Standards for StackOne package structure
-globs: packages/stackone-*/**
----
-# StackOne Package Structure
-
-
-name: stackone_package_structure
-description: Standards for organizing StackOne packages
-
-filters:
- - type: path
- pattern: "^packages/stackone-.*"
-
-actions:
- - type: suggest
- message: |
- When creating a new StackOne package:
-
- 1. Package Structure:
- ```
- packages/stackone-{name}/
- ├── stackone_{name}/ # Package code (no src directory)
- │ ├── __init__.py
- │ └── ...
- ├── tests/ # Test files
- │ ├── __init__.py
- │ └── test_*.py
- ├── pyproject.toml # Package configuration
- └── README.md # Package documentation
- ```
-
- 2. Import paths:
- - Use absolute imports from package root
- - Example: `from stackone_ai.tools import Tool`
-
- 3. Resource files:
- - Place in package directory next to code
- - Example: `stackone_ai/oas/*.json`
-
- 4. Test files:
- - Place in tests directory
- - Name pattern: `test_*.py`
- - Use pytest fixtures and mocks
-
-examples:
- - input: |
- # Bad structure
- packages/stackone-core/
- ├── src/
- │ └── stackone_ai/
-
- # Good structure
- packages/stackone-core/
- ├── stackone_ai/
- │ ├── __init__.py
- │ ├── tools.py
- │ └── oas/
- │ └── crm.json
- output: "Correctly structured StackOne package"
-
-metadata:
- priority: high
- version: 1.0
- tags:
- - package
- - structure
- - python
-
diff --git a/.cursor/rules/package-installation.mdc b/.cursor/rules/package-installation.mdc
index c176461..d85adff 100644
--- a/.cursor/rules/package-installation.mdc
+++ b/.cursor/rules/package-installation.mdc
@@ -29,9 +29,6 @@ actions:
2. Package Level Dependencies:
```bash
- # Navigate to package directory
- cd packages/stackone-core
-
# Install package dependencies
uv add pydantic
uv add requests
@@ -52,7 +49,10 @@ actions:
uv run pytest
# Run specific package tests
- uv run pytest packages/stackone-core/tests/
+ uv run pytest stackone_ai
+
+ #Run tests on examples
+ uv run pytest examples
```
5. Package Dependencies:
@@ -72,7 +72,6 @@ examples:
uv add --dev black
# Good: Installing package dependencies
- cd packages/stackone-core
uv add pydantic
# Bad: Using pip install
diff --git a/.cursor/rules/test-standards.mdc b/.cursor/rules/test-standards.mdc
deleted file mode 100644
index 66a1c21..0000000
--- a/.cursor/rules/test-standards.mdc
+++ /dev/null
@@ -1,81 +0,0 @@
----
-description: Standards for running tests with UV in StackOne packages
-globs: packages/stackone-*/tests/**
----
-# Test Standards
-
-
-name: test_standards
-description: Standards for running and writing tests in StackOne packages
-
-filters:
- - type: file_extension
- pattern: "\\.py$"
- - type: path
- pattern: "^packages/stackone-.*/tests/.*"
-
-actions:
- - type: suggest
- message: |
- When working with tests:
-
- 1. Running Tests:
- ```bash
- # Run all tests
- uv run pytest
-
- # Run specific test file
- uv run pytest packages/stackone-core/tests/test_tools.py
-
- # Run with coverage
- uv run pytest --cov=stackone_ai
- ```
-
- 2. Test File Structure:
- - Place in package's tests directory
- - Name pattern: `test_*.py`
- - Group related tests in same file
- - Use descriptive test names
-
- 3. Test Dependencies:
- ```toml
- # In pyproject.toml
- [project.optional-dependencies]
- test = [
- "pytest>=7.0.0",
- "pytest-cov>=4.0.0",
- "pytest-asyncio>=0.23.0",
- ]
- ```
-
- 4. Test Guidelines:
- - Use pytest fixtures for reusable setup
- - Mock external dependencies
- - Test both success and error cases
- - Add type hints to fixtures and tests
-
-examples:
- - input: |
- # Good test structure
- from typing import Dict
- import pytest
-
- @pytest.fixture
- def mock_data() -> Dict:
- return {"test": "data"}
-
- def test_feature(mock_data: Dict):
- assert mock_data["test"] == "data"
-
- # Running tests
- # uv run pytest
- output: "Correctly structured and executed tests"
-
-metadata:
- priority: high
- version: 1.0
- tags:
- - testing
- - pytest
- - uv
-
\ No newline at end of file
diff --git a/.cursor/rules/uv-scripts.mdc b/.cursor/rules/uv-scripts.mdc
index bc74601..a35597d 100644
--- a/.cursor/rules/uv-scripts.mdc
+++ b/.cursor/rules/uv-scripts.mdc
@@ -14,7 +14,7 @@ filters:
- type: path
pattern: "^scripts/.*"
- type: exclude_path
- pattern: "^(packages|examples)/.*" # Exclude packages and examples
+ pattern: "^(stackone_ai|examples)/.*" # Exclude package and examples
actions:
- type: suggest
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index 52b2754..ec59d87 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -26,4 +26,4 @@ jobs:
args: check .
- name: Run Mypy
- run: uv run mypy packages/stackone-ai/stackone_ai
+ run: uv run mypy stackone_ai
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 8133c0d..f465e38 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -35,6 +35,5 @@ jobs:
env:
UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
run: |
- cd packages/stackone-ai
uv build --no-sources
uv publish
diff --git a/.gitignore b/.gitignore
index 271635c..28a513e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -11,3 +11,7 @@ __pycache__
# Documentation build
.docs/
site/
+
+*.egg-info
+dist/
+build/
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 66261db..a623acc 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -10,7 +10,7 @@ repos:
rev: v1.8.0
hooks:
- id: mypy
- files: ^packages/stackone-ai/stackone_ai/
+ files: ^stackone_ai/
additional_dependencies:
- types-requests
- types-PyYAML
diff --git a/.release-please-config.json b/.release-please-config.json
index dc5337e..85dd206 100644
--- a/.release-please-config.json
+++ b/.release-please-config.json
@@ -1,14 +1,14 @@
{
+ "release-type": "python",
+ "changelog-path": "CHANGELOG.md",
+ "bump-minor-pre-major": true,
+ "bump-patch-for-minor-pre-major": true,
+ "draft": false,
+ "prerelease": false,
+ "include-v-in-tag": true,
"packages": {
- "packages/stackone-ai": {
- "release-type": "python",
- "changelog-path": "CHANGELOG.md",
- "bump-minor-pre-major": true,
- "bump-patch-for-minor-pre-major": true,
- "draft": false,
- "prerelease": false,
- "include-component-in-tag": true,
- "include-v-in-tag": true
+ ".": {
+ "package-name": "stackone-ai"
}
},
"$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json"
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index d588efe..40ff6fe 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- "packages/stackone-ai": "0.0.1"
+ ".": "0.0.2"
}
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 6fa8d70..4a6e339 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -1,3 +1,4 @@
{
- "python.analysis.ignore": ["scripts/**"]
+ "python.analysis.ignore": ["scripts/**"],
+ "makefile.configureOnOpen": false
}
diff --git a/Makefile b/Makefile
index 95dff2f..b9aae47 100644
--- a/Makefile
+++ b/Makefile
@@ -5,8 +5,17 @@ install:
lint:
uv run ruff check .
+lint-fix:
+ uv run ruff check --fix .
+
test:
uv run pytest
+test-tools:
+ uv run pytest stackone_ai
+
+test-examples:
+ uv run pytest examples
+
mypy:
- uv run mypy packages/stackone-ai/stackone_ai
+ uv run mypy stackone_ai
diff --git a/README.md b/README.md
index 2cbf222..8994497 100644
--- a/README.md
+++ b/README.md
@@ -1,52 +1,56 @@
# StackOne AI SDK
+StackOne AI provides a unified interface for accessing various SaaS tools through AI-friendly APIs.
+
## Installation
```bash
pip install stackone-ai
```
-## Usage
+## Quick Start
```python
from stackone_ai import StackOneToolSet
+# Initialize with API key
+toolset = StackOneToolSet() # Uses STACKONE_API_KEY env var
+# Or explicitly: toolset = StackOneToolSet(api_key="your-api-key")
-```
-
-## Contributing
-
-### Prerequisites
+# Get HRIS-related tools
+tools = toolset.get_tools("hris_*", account_id="your-account-id")
-- [uv](https://docs.astral.sh/uv/getting-started/installation/)
-
-### Setup
-
-```bash
-git clone https://github.com/StackOneHQ/stackone-ai-python.git
-cd stackone-ai-python
+# Use a specific tool
+employee_tool = tools.get_tool("hris_get_employee")
+employee = employee_tool.execute({"id": "employee-id"})
```
-install dependencies and pre-commit hooks
+## Features
-```bash
-make install
-```
+- Unified interface for multiple SaaS tools
+- AI-friendly tool descriptions and parameters
+- Integration with popular AI frameworks:
+ - OpenAI Functions
+ - LangChain Tools
+ - CrewAI Tools
+ - LangGraph Tool Node
-### Run tests
+## Documentation
-```bash
-make test
-```
+For more examples and documentation, visit:
-### Run examples
+- [Error Handling](docs/error-handling.md)
+- [StackOne Account IDs](docs/stackone-account-ids.md)
+- [Available Tools](docs/available-tools.md)
+- [File Uploads](docs/file-uploads.md)
-```bash
-uv run examples/openai_tools_example.py
-```
+## AI Framework Integration
-## Todo
+- [OpenAI Integration](docs/openai-integration.md)
+- [LangChain Integration](docs/langchain-integration.md)
+- [CrewAI Integration](docs/crewai-integration.md)
+- [LangGraph Tool Node](docs/langgraph-tool-node.md)
-- [ ] Release please
+## License
-add release please to release the package to pypi.
+MIT License
diff --git a/examples/available_tools.py b/examples/available_tools.py
index ee58a0c..06c6898 100644
--- a/examples/available_tools.py
+++ b/examples/available_tools.py
@@ -1,16 +1,83 @@
"""
Get available tools from your StackOne organisation based on the account id.
+This example demonstrates different ways to filter and organize tools:
+1. Getting all available tools
+2. Filtering by vertical
+3. Using multiple patterns for cross-vertical functionality
+4. Filtering by specific operations
+5. Combining multiple operation patterns
+
+# TODO: experimental - get_available_tools(account_id="your_account_id")
+
```bash
uv run examples/available_tools.py
```
"""
+from dotenv import load_dotenv
+
+from stackone_ai import StackOneToolSet
+
+load_dotenv()
+
+
+def get_available_tools() -> None:
+ toolset = StackOneToolSet()
+
+ # First, get all tools
+ all_tools = toolset.get_tools()
+ assert len(all_tools) > 100, "Expected at least 100 tools in total"
+
+ # Then, let's get just HRIS tools using a vertical filter
+ hris_tools = toolset.get_tools("hris_*")
+ assert len(hris_tools) > 10, "Expected at least 10 HRIS tools"
+
+ # Now, let's get people-related tools across verticals
+ people_tools = toolset.get_tools(
+ [
+ "hris_*employee*",
+ "crm_*contact*",
+ ]
+ )
+ assert len(people_tools) > 20, "Expected at least 20 people-related tools"
+ for tool in people_tools:
+ assert "employee" in tool.name or "contact" in tool.name, (
+ f"Tool {tool.name} doesn't contain 'employee' or 'contact'"
+ )
+
+ # We can also filter by specific operations across all verticals
+ upload_tools = toolset.get_tools("*upload*")
+ assert len(upload_tools) > 0, "Expected at least one upload tool"
+ for tool in upload_tools:
+ assert "upload" in tool.name.lower(), f"Tool {tool.name} doesn't contain 'upload'"
+
+ # Get all tools except HRIS
+ non_hris_tools = toolset.get_tools("!hris_*")
+ assert len(non_hris_tools) > 0, "Expected at least one non-HRIS tool"
+ for tool in non_hris_tools:
+ assert not tool.name.startswith("hris_"), f"Tool {tool.name} should not be an HRIS tool"
-# TODO: Add examples
-def get_available_tools():
- print("Getting available tools")
+ # Complex filtering with positive and negative patterns
+ list_tools = toolset.get_tools(
+ [
+ "*list*", # Include list operations
+ "*search*", # Include search operations
+ "!*delete*", # Exclude delete operations
+ "!*remove*", # Exclude remove operations
+ ]
+ )
+ assert len(list_tools) > 0, "Expected at least one list/search tool"
+ for tool in list_tools:
+ # Should match positive patterns
+ assert any(op in tool.name.lower() for op in ["list", "search"]), (
+ f"Tool {tool.name} doesn't contain 'list' or 'search'"
+ )
+ # Should not match negative patterns
+ assert not any(op in tool.name.lower() for op in ["delete", "remove"]), (
+ f"Tool {tool.name} contains excluded operation"
+ )
if __name__ == "__main__":
- print(get_available_tools())
+ get_available_tools()
diff --git a/examples/crewai_integration.py b/examples/crewai_integration.py
index a23f11c..992e9eb 100644
--- a/examples/crewai_integration.py
+++ b/examples/crewai_integration.py
@@ -9,6 +9,7 @@
"""
from crewai import Agent, Crew, Task
+
from stackone_ai import StackOneToolSet
account_id = "45072196112816593343"
@@ -17,13 +18,16 @@
def crewai_integration():
toolset = StackOneToolSet()
- tools = toolset.get_tools(
- vertical="hris",
- account_id=account_id,
- )
+ tools = toolset.get_tools("hris_*", account_id=account_id)
# CrewAI uses LangChain tools natively
langchain_tools = tools.to_langchain()
+ assert len(langchain_tools) > 0, "Expected at least one LangChain tool"
+
+ for tool in langchain_tools:
+ assert hasattr(tool, "name"), "Expected tool to have name"
+ assert hasattr(tool, "description"), "Expected tool to have description"
+ assert hasattr(tool, "_run"), "Expected tool to have _run method"
agent = Agent(
role="HR Manager",
@@ -42,7 +46,9 @@ def crewai_integration():
)
crew = Crew(agents=[agent], tasks=[task])
- print(crew.kickoff())
+
+ result = crew.kickoff()
+ assert result is not None, "Expected result to be returned"
if __name__ == "__main__":
diff --git a/examples/error_handling.py b/examples/error_handling.py
index 1202efe..b1f4b1c 100644
--- a/examples/error_handling.py
+++ b/examples/error_handling.py
@@ -1,38 +1,65 @@
+"""
+This example demonstrates error handling when using the StackOne SDK.
+
+Run the following command to see the output:
+
+```bash
+uv run examples/error_handling.py
+```
+"""
+
+import os
+
from dotenv import load_dotenv
+
from stackone_ai import StackOneToolSet
+from stackone_ai.models import StackOneAPIError
+from stackone_ai.toolset import ToolsetConfigError, ToolsetLoadError
load_dotenv()
def error_handling() -> None:
+ # Example 1: Configuration error - missing API key
+ original_api_key = os.environ.pop("STACKONE_API_KEY", None)
+ try:
+ try:
+ StackOneToolSet(api_key=None)
+ raise AssertionError("Expected ToolsetConfigError")
+ except ToolsetConfigError as e:
+ assert (
+ str(e)
+ == "API key must be provided either through api_key parameter or STACKONE_API_KEY environment variable"
+ )
+ finally:
+ if original_api_key:
+ os.environ["STACKONE_API_KEY"] = original_api_key
+
+ # Example 2: Invalid vertical error
toolset = StackOneToolSet()
+ try:
+ # Use a non-existent vertical to trigger error
+ tools = toolset.get_tools("nonexistent_vertical_*")
+ # If we get here, no tools were found but no error was raised
+ assert len(tools) == 0, "Expected no tools for nonexistent vertical"
+ except ToolsetLoadError as e:
+ assert "Error loading tools" in str(e)
- # Example 1: Handle unknown vertical
- tools = toolset.get_tools(vertical="unknown_vertical")
- print("Tools for unknown vertical:", tools._tool_map)
- # {}
+ # Example 3: API error - invalid request
+ toolset = StackOneToolSet()
+ tools = toolset.get_tools("crm_*")
+
+ # Try to make an API call without required parameters
+ list_contacts = tools.get_tool("crm_list_contacts")
+ assert list_contacts is not None, "Expected crm_list_contacts tool to exist"
- # Example 2: Handle API errors with account_id
- tools = toolset.get_tools(vertical="crm", account_id="test_id")
- try:
- # Try with invalid ID
- contacts_tool = tools.get_tool("get_contact")
- if contacts_tool:
- result = contacts_tool.execute({"id": "invalid_id"})
- except Exception as e:
- print(f"API Error: {e}")
- # 400 Client Error: Bad Request for url: https://api.stackone.com/unified/crm/contacts/invalid_id
-
- # Example 3: Handle missing account ID
- tools_no_account = toolset.get_tools(vertical="crm", account_id=None)
try:
- list_contacts_tool = tools_no_account.get_tool("list_contacts")
- if list_contacts_tool:
- result = list_contacts_tool.execute()
- print("Result without account ID:", result)
- except Exception as e:
- print(f"Error when account ID is missing: {e}")
- # 501 Server Error: Not Implemented for url: https://api.stackone.com/unified/crm/contacts
+ # Execute without required parameters should raise error
+ list_contacts.execute({})
+ raise AssertionError("Expected StackOneAPIError")
+ except StackOneAPIError as e:
+ assert e.status_code >= 400, "Expected error status code"
+ assert e.response_body is not None, "Expected error response body"
if __name__ == "__main__":
diff --git a/examples/file_uploads.py b/examples/file_uploads.py
new file mode 100644
index 0000000..ba8fd6e
--- /dev/null
+++ b/examples/file_uploads.py
@@ -0,0 +1,71 @@
+"""
+Example demonstrating file upload functionality with StackOne.
+Shows how to upload an employee document using the HRIS integration.
+
+This example is runnable with the following command:
+```bash
+uv run examples/file_upload_example.py
+```
+"""
+
+import base64
+import tempfile
+from pathlib import Path
+
+from dotenv import load_dotenv
+
+from stackone_ai import StackOneToolSet
+
+load_dotenv()
+
+account_id = "45072196112816593343"
+employee_id = "c28xIQaWQ6MzM5MzczMDA2NzMzMzkwNzIwNA"
+
+
+def upload_employee_document() -> None:
+ """Demonstrate uploading an employee document using StackOne."""
+ with tempfile.TemporaryDirectory() as temp_dir:
+ resume_content = """
+ JOHN DOE
+ Software Engineer
+
+ EXPERIENCE
+ Senior Developer - Tech Corp
+ 2020-Present
+ - Led development of core features
+ - Managed team of 5 engineers
+
+ EDUCATION
+ BS Computer Science
+ University of Technology
+ 2016-2020
+ """
+
+ resume_file = Path(temp_dir) / "resume.pdf"
+ resume_file.write_text(resume_content)
+
+ toolset = StackOneToolSet()
+ tools = toolset.get_tools("hris_*", account_id=account_id)
+
+ upload_tool = tools.get_tool("hris_upload_employee_document")
+ assert upload_tool is not None
+
+ with open(resume_file, "rb") as f:
+ file_content = base64.b64encode(f.read()).decode()
+
+ upload_params = {
+ "x-account-id": account_id,
+ "id": employee_id,
+ "name": "resume",
+ "content": file_content,
+ "category": {"value": "shared"},
+ "file_format": {"value": "txt"},
+ }
+
+ result = upload_tool.execute(upload_params)
+ assert result is not None
+ assert result.get("message") == "File uploaded successfully"
+
+
+if __name__ == "__main__":
+ upload_employee_document()
diff --git a/examples/index.py b/examples/index.py
index c28b091..b20ea73 100644
--- a/examples/index.py
+++ b/examples/index.py
@@ -13,9 +13,16 @@
## Quick Start
Here's a simple example. All examples are complete and runnable.
+
+You can even run the example directly from the command line:
+
+```bash
+uv run examples/index.py
+```
"""
from dotenv import load_dotenv
+
from stackone_ai import StackOneToolSet
"""
@@ -45,14 +52,15 @@
def quickstart():
toolset = StackOneToolSet()
- # Filter by vertical and add the account ID
- tools = toolset.get_tools(vertical="hris", account_id=account_id)
+ # Get all HRIS-related tools
+ tools = toolset.get_tools("hris_*", account_id=account_id)
# Use a specific tool
- employee_tool = tools.get_tool("get_employee")
- if employee_tool:
- employee = employee_tool.execute({"id": employee_id})
- print(employee)
+ employee_tool = tools.get_tool("hris_get_employee")
+ assert employee_tool is not None
+
+ employee = employee_tool.execute({"id": employee_id})
+ assert employee is not None
if __name__ == "__main__":
@@ -61,10 +69,15 @@ def quickstart():
"""
## Next Steps
-Check out some examples:
+Check out some more documentation:
+
- [Error Handling](error-handling.md)
-- [StackOne Account IDs](stackone_account_ids.md)
-- [Available Tools](available_tools.md)
+- [StackOne Account IDs](stackone-account-ids.md)
+- [Available Tools](available-tools.md)
+- [File Uploads](file-uploads.md)
+
+Or get started with an integration:
+
- [OpenAI Integration](openai-integration.md)
- [LangChain Integration](langchain-integration.md)
- [CrewAI Integration](crewai-integration.md)
diff --git a/examples/langchain_integration.py b/examples/langchain_integration.py
index 72ec071..af5c28c 100644
--- a/examples/langchain_integration.py
+++ b/examples/langchain_integration.py
@@ -8,6 +8,7 @@
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
+
from stackone_ai import StackOneToolSet
load_dotenv()
@@ -18,10 +19,20 @@
def langchain_integration() -> None:
toolset = StackOneToolSet()
- tools = toolset.get_tools(vertical="hris", account_id=account_id)
+ tools = toolset.get_tools("hris_*", account_id=account_id)
+ # Convert to LangChain format and verify
langchain_tools = tools.to_langchain()
+ assert len(langchain_tools) > 0, "Expected at least one LangChain tool"
+
+ # Verify tool structure
+ for tool in langchain_tools:
+ assert hasattr(tool, "name"), "Expected tool to have name"
+ assert hasattr(tool, "description"), "Expected tool to have description"
+ assert hasattr(tool, "_run"), "Expected tool to have _run method"
+ assert hasattr(tool, "args_schema"), "Expected tool to have args_schema"
+ # Create model with tools
model = ChatOpenAI(model="gpt-4o-mini")
model_with_tools = model.bind_tools(langchain_tools)
@@ -31,7 +42,9 @@ def langchain_integration() -> None:
for tool_call in result.tool_calls:
tool = tools.get_tool(tool_call["name"])
if tool:
- print(tool.execute(tool_call["args"]))
+ result = tool.execute(tool_call["args"])
+ assert result is not None
+ assert result.get("data") is not None
if __name__ == "__main__":
diff --git a/examples/langgraph_tool_node.py b/examples/langgraph_tool_node.py
index 95c0ac0..3df8bb9 100644
--- a/examples/langgraph_tool_node.py
+++ b/examples/langgraph_tool_node.py
@@ -1,4 +1,6 @@
"""
+TODO!!
+
This example demonstrates how to use StackOne tools with LangGraph.
```bash
@@ -6,10 +8,31 @@
```
"""
+from dotenv import load_dotenv
+
+from stackone_ai import StackOneToolSet
+
+load_dotenv()
+
+account_id = "45072196112816593343"
+employee_id = "c28xIQaWQ6MzM5MzczMDA2NzMzMzkwNzIwNA"
+
-# TODO: Add examples
def langgraph_tool_node() -> None:
- print("LangGraph tool node")
+ """Demonstrate basic LangGraph integration with StackOne tools."""
+ toolset = StackOneToolSet()
+ tools = toolset.get_tools("hris_*", account_id=account_id)
+
+ # Verify we have the tools we need
+ assert len(tools) > 0, "Expected at least one HRIS tool"
+ employee_tool = tools.get_tool("hris_get_employee")
+ assert employee_tool is not None, "Expected hris_get_employee tool"
+
+ # TODO: Add LangGraph specific integration
+ # For now, just verify the tools are properly configured
+ langchain_tools = tools.to_langchain()
+ assert len(langchain_tools) > 0, "Expected LangChain tools"
+ assert all(hasattr(tool, "_run") for tool in langchain_tools), "Expected all tools to have _run method"
if __name__ == "__main__":
diff --git a/examples/openai_integration.py b/examples/openai_integration.py
index 9538355..e8c479c 100644
--- a/examples/openai_integration.py
+++ b/examples/openai_integration.py
@@ -1,13 +1,17 @@
"""
This example demonstrates how to use StackOne tools with OpenAI's function calling.
+This example is runnable with the following command:
```bash
uv run examples/openai_integration.py
```
+
+You can find out more about the OpenAI Function Calling API format [here](https://platform.openai.com/docs/guides/function-calling).
"""
from dotenv import load_dotenv
from openai import OpenAI
+
from stackone_ai import StackOneToolSet
load_dotenv()
@@ -28,7 +32,16 @@ def handle_tool_calls(tools, tool_calls) -> list[dict]:
def openai_integration() -> None:
client = OpenAI()
toolset = StackOneToolSet()
- tools = toolset.get_tools(vertical="hris", account_id=account_id)
+
+ # Filter tools to only the ones we need to avoid context window limits
+ tools = toolset.get_tools(
+ [
+ "hris_get_employee",
+ "hris_list_employee_employments",
+ "hris_get_employee_employment",
+ ],
+ account_id=account_id,
+ )
openai_tools = tools.to_openai()
messages = [
@@ -39,33 +52,41 @@ def openai_integration() -> None:
},
]
- while True:
- response = client.chat.completions.create(
- model="gpt-4o-mini",
- messages=messages,
- tools=openai_tools,
- tool_choice="auto",
- )
-
- if not response.choices[0].message.tool_calls:
- print("Response:", response.choices[0].message.content)
- break
-
- results = handle_tool_calls(tools, response.choices[0].message.tool_calls)
- if not results:
- print("Error: Failed to execute tools")
- break
-
- messages.extend(
- [
- {"role": "assistant", "content": None, "tool_calls": response.choices[0].message.tool_calls},
- {
- "role": "tool",
- "tool_call_id": response.choices[0].message.tool_calls[0].id,
- "content": str(results[0]),
- },
- ]
- )
+ response = client.chat.completions.create(
+ model="gpt-4o-mini",
+ messages=messages,
+ tools=openai_tools,
+ tool_choice="auto",
+ )
+
+ # Verify we got a response with tool calls
+ assert response.choices[0].message.tool_calls is not None, "Expected tool calls in response"
+
+ # Handle the tool calls and verify results
+ results = handle_tool_calls(tools, response.choices[0].message.tool_calls)
+ assert results is not None and len(results) > 0, "Expected tool call results"
+ assert "data" in results[0], "Expected data in tool call result"
+
+ # Verify we can continue the conversation with the results
+ messages.extend(
+ [
+ {"role": "assistant", "content": None, "tool_calls": response.choices[0].message.tool_calls},
+ {
+ "role": "tool",
+ "tool_call_id": response.choices[0].message.tool_calls[0].id,
+ "content": str(results[0]),
+ },
+ ]
+ )
+
+ # Verify the final response
+ final_response = client.chat.completions.create(
+ model="gpt-4o-mini",
+ messages=messages,
+ tools=openai_tools,
+ tool_choice="auto",
+ )
+ assert final_response.choices[0].message.content is not None, "Expected final response content"
if __name__ == "__main__":
diff --git a/examples/stackone_account_ids.py b/examples/stackone_account_ids.py
index 4a5d9ef..cfa2fca 100644
--- a/examples/stackone_account_ids.py
+++ b/examples/stackone_account_ids.py
@@ -7,6 +7,7 @@
"""
from dotenv import load_dotenv
+
from stackone_ai import StackOneToolSet
load_dotenv()
@@ -15,18 +16,18 @@
def stackone_account_ids():
toolset = StackOneToolSet()
- # Filter by vertical and set the account ID
- tools = toolset.get_tools(vertical="hris", account_id="test_id")
+ # Filter by pattern and set the account ID
+ tools = toolset.get_tools("hris_*", account_id="test_id")
# You can over write the account ID here..
tools.set_account_id("a_different_id")
- employee_tool = tools.get_tool("get_employee")
- if employee_tool:
- # You can even set the account ID on a per-tool basis
- employee_tool.set_account_id("again_another_id")
+ employee_tool = tools.get_tool("hris_get_employee")
+ assert employee_tool is not None
- print(employee_tool.get_account_id())
+ # You can even set the account ID on a per-tool basis
+ employee_tool.set_account_id("again_another_id")
+ assert employee_tool.get_account_id() == "again_another_id"
if __name__ == "__main__":
diff --git a/mkdocs.yml b/mkdocs.yml
index d04ecc8..3c5faf7 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -48,5 +48,6 @@ nav:
- OpenAI Integration: openai-integration.md
- CrewAI Integration: crewai-integration.md
- LangChain Integration: langchain-integration.md
- - StackOne Account IDs: stackone_account_ids.md
- - Error Handling: error-handling.md
+ - StackOne Account IDs: stackone-account-ids.md
+ - Error Handling: error-handling.md
+ - File Uploads: file-uploads.md
diff --git a/packages/stackone-ai/README.md b/packages/stackone-ai/README.md
deleted file mode 100644
index 1a81ea0..0000000
--- a/packages/stackone-ai/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# StackOne AI
-
-> StackOne AI is a Python library for building AI agents with StackOne.
diff --git a/packages/stackone-ai/pyproject.toml b/packages/stackone-ai/pyproject.toml
deleted file mode 100644
index d9353d0..0000000
--- a/packages/stackone-ai/pyproject.toml
+++ /dev/null
@@ -1,18 +0,0 @@
-[project]
-name = "stackone-ai"
-version = "0.0.1"
-description = "tools for stackone"
-readme = "README.md"
-requires-python = ">=3.11"
-dependencies = [
- "pydantic>=2.10.6",
- "requests>=2.32.3",
- "langchain-core>=0.1.0",
-]
-
-[build-system]
-requires = ["hatchling"]
-build-backend = "hatchling.build"
-
-[tool.uv]
-package = true
diff --git a/packages/stackone-ai/stackone_ai/models.py b/packages/stackone-ai/stackone_ai/models.py
deleted file mode 100644
index 006dcce..0000000
--- a/packages/stackone-ai/stackone_ai/models.py
+++ /dev/null
@@ -1,98 +0,0 @@
-from collections.abc import Sequence
-from typing import Any
-
-from langchain_core.tools import BaseTool
-from pydantic import BaseModel, Field
-
-
-class ExecuteConfig(BaseModel):
- headers: dict = Field(default_factory=dict) # Keep this with default empty dict
- method: str
- url: str
- name: str
- body_type: str | None = None
- parameter_locations: dict[str, str] = Field(
- default_factory=dict
- ) # Maps param name to location (header, query, path)
-
-
-class ToolParameters(BaseModel):
- type: str
- properties: dict
-
-
-class ToolDefinition(BaseModel):
- description: str
- parameters: ToolParameters
- execute: ExecuteConfig
-
-
-class Tool(BaseModel):
- """Base Tool model"""
-
- name: str
- description: str
- parameters: ToolParameters
-
- def execute(self, arguments: str | dict | None = None) -> dict[str, Any]:
- """Execute the tool with the given parameters"""
- raise NotImplementedError
-
- def to_openai_function(self) -> dict:
- """Convert this tool to OpenAI's function format"""
- raise NotImplementedError
-
- def set_account_id(self, account_id: str | None) -> None:
- """Set the account ID for this tool."""
- raise NotImplementedError
-
- def get_account_id(self) -> str | None:
- """Get the current account ID for this tool."""
- raise NotImplementedError
-
- def to_langchain(self) -> Any:
- """Convert this tool to LangChain format"""
- raise NotImplementedError
-
-
-class Tools:
- """Container for Tool instances"""
-
- def __init__(self, tools: list[Tool]):
- self.tools = tools
- self._tool_map = {tool.name: tool for tool in tools}
-
- def __getitem__(self, index: int) -> Tool:
- return self.tools[index]
-
- def __len__(self) -> int:
- return len(self.tools)
-
- def get_tool(self, name: str) -> Tool | None:
- """Get a tool by its name"""
- return self._tool_map.get(name)
-
- def set_account_id(self, account_id: str | None) -> None:
- """Set the account ID for all tools in this collection.
-
- Args:
- account_id: The account ID to use, or None to clear it
- """
- for tool in self.tools:
- tool.set_account_id(account_id)
-
- def get_account_id(self) -> str | None:
- """Get the current account ID for this tool."""
- for tool in self.tools:
- account_id = tool.get_account_id()
- if isinstance(account_id, str): # Type guard to ensure we return str | None
- return account_id
- return None
-
- def to_openai(self) -> list[dict]:
- """Convert all tools to OpenAI function format"""
- return [tool.to_openai_function() for tool in self.tools]
-
- def to_langchain(self) -> Sequence[BaseTool]:
- """Convert all tools to LangChain format"""
- return [tool.to_langchain() for tool in self.tools]
diff --git a/packages/stackone-ai/stackone_ai/tools.py b/packages/stackone-ai/stackone_ai/tools.py
deleted file mode 100644
index 92e609b..0000000
--- a/packages/stackone-ai/stackone_ai/tools.py
+++ /dev/null
@@ -1,194 +0,0 @@
-import base64
-import json
-from typing import Annotated, Any
-
-import requests
-from langchain_core.tools import BaseTool
-from pydantic import BaseModel, Field, PrivateAttr
-
-from stackone_ai.models import (
- ExecuteConfig,
- ToolParameters,
-)
-from stackone_ai.models import (
- Tool as StackOneBaseTool,
-)
-
-
-class StackOneTool(StackOneBaseTool):
- """Concrete implementation of StackOne Tool"""
-
- name: str = Field(description="Tool name")
- description: str = Field(description="Tool description")
- parameters: ToolParameters = Field(description="Tool parameters")
- _execute_config: ExecuteConfig = PrivateAttr()
- _api_key: str = PrivateAttr()
- _account_id: str | None = PrivateAttr(default=None)
-
- def __init__(
- self,
- description: str,
- parameters: ToolParameters,
- _execute_config: ExecuteConfig,
- _api_key: str,
- _account_id: str | None = None,
- ) -> None:
- super().__init__(
- name=_execute_config.name,
- description=description,
- parameters=parameters,
- )
- self._execute_config = _execute_config
- self._api_key = _api_key
- self._account_id = _account_id
-
- def execute(self, arguments: str | dict | None = None) -> dict[str, Any]:
- """Execute the tool with the given parameters"""
- # Handle both string and dict arguments
- if isinstance(arguments, str):
- kwargs = json.loads(arguments)
- else:
- kwargs = arguments or {}
-
- # Create basic auth header with API key as username
- auth_string = base64.b64encode(f"{self._api_key}:".encode()).decode()
-
- headers = {
- "Authorization": f"Basic {auth_string}",
- "User-Agent": "stackone-python/1.0.0",
- }
-
- if self._account_id:
- headers["x-account-id"] = self._account_id
-
- # Add predefined headers
- headers.update(self._execute_config.headers)
-
- url = self._execute_config.url
- body_params = {}
- query_params = {}
-
- # Handle parameters based on their location
- for key, value in kwargs.items():
- param_location = self._execute_config.parameter_locations.get(key)
-
- if param_location == "path":
- url = url.replace(f"{{{key}}}", str(value))
- elif param_location == "query":
- query_params[key] = value
- elif param_location == "body":
- body_params[key] = value
- else:
- # Default behavior
- if f"{{{key}}}" in url:
- url = url.replace(f"{{{key}}}", str(value))
- elif self._execute_config.method.upper() in ["GET", "DELETE"]:
- query_params[key] = value
- else:
- body_params[key] = value
-
- request_kwargs: dict[str, Any] = {
- "method": self._execute_config.method,
- "url": url,
- "headers": headers,
- }
-
- if body_params:
- body_type = self._execute_config.body_type or "json"
- if body_type == "json":
- request_kwargs["json"] = body_params
- elif body_type == "form":
- request_kwargs["data"] = body_params
-
- if query_params:
- request_kwargs["params"] = query_params
-
- response = requests.request(**request_kwargs)
- response.raise_for_status()
-
- # Ensure we return a dict
- result = response.json()
- if not isinstance(result, dict):
- return {"result": result}
- return result
-
- def to_openai_function(self) -> dict:
- """Convert this tool to OpenAI's function format"""
- return {
- "type": "function",
- "function": {
- "name": self.name,
- "description": self.description,
- "parameters": {
- "type": self.parameters.type,
- "properties": self.parameters.properties,
- "required": list(self.parameters.properties.keys()),
- "additionalProperties": False,
- },
- "strict": True,
- },
- }
-
- def to_langchain(self) -> BaseTool:
- """Convert this tool to LangChain format"""
- tool_self = self # Capture self reference for inner class
-
- # Create properly annotated schema for the tool
- schema_props: dict[str, Any] = {}
- annotations: dict[str, Any] = {}
-
- for name, details in self.parameters.properties.items():
- python_type: type = str # Default to str
- if isinstance(details, dict):
- type_str = details.get("type", "string")
- if type_str == "number":
- python_type = float
- elif type_str == "integer":
- python_type = int
- elif type_str == "boolean":
- python_type = bool
-
- field = Field(description=details.get("description", ""))
- else:
- field = Field(description="")
-
- schema_props[name] = field
- annotations[name] = Annotated[python_type, field]
-
- # Create the schema class with proper annotations
- schema_class = type(
- f"{self.name.title()}Args",
- (BaseModel,),
- {
- "__annotations__": annotations,
- "__module__": __name__,
- **schema_props,
- },
- )
-
- class StackOneLangChainTool(BaseTool):
- name: str = tool_self.name
- description: str = tool_self.description
- args_schema: type[BaseModel] = schema_class
- return_direct: bool = True
- func = staticmethod(tool_self.execute)
-
- def _run(self, **kwargs: Any) -> Any:
- return tool_self.execute(kwargs)
-
- async def _arun(self, **kwargs: Any) -> Any:
- return self._run(**kwargs)
-
- return StackOneLangChainTool()
-
- def set_account_id(self, account_id: str | None) -> None:
- """Set the account ID for this tool.
-
- Args:
- account_id: The account ID to use, or None to clear it
- """
- self._account_id = account_id
-
- def get_account_id(self) -> str | None:
- """Get the current account ID for this tool."""
- return self._account_id
diff --git a/packages/stackone-ai/stackone_ai/toolset.py b/packages/stackone-ai/stackone_ai/toolset.py
deleted file mode 100644
index bc1da4d..0000000
--- a/packages/stackone-ai/stackone_ai/toolset.py
+++ /dev/null
@@ -1,100 +0,0 @@
-import json
-import os
-from typing import Any
-
-from stackone_ai.constants import OAS_DIR
-from stackone_ai.models import (
- ExecuteConfig,
- ToolDefinition,
- ToolParameters,
- Tools,
-)
-from stackone_ai.models import (
- Tool as StackOneBaseTool,
-)
-from stackone_ai.tools import StackOneTool
-
-
-class StackOneToolSet:
- """Main class for accessing StackOne tools"""
-
- def __init__(
- self,
- api_key: str | None = None,
- account_id: str | None = None,
- ) -> None:
- """Initialize StackOne tools with authentication.
-
- Args:
- api_key: Optional API key. If not provided, will try to get from STACKONE_API_KEY env var
- account_id: Optional account ID. If not provided, will try to get from STACKONE_ACCOUNT_ID env var
- """
- api_key_value = api_key or os.getenv("STACKONE_API_KEY")
- if not api_key_value:
- raise ValueError(
- "API key must be provided either through api_key parameter or "
- "STACKONE_API_KEY environment variable"
- )
- self.api_key: str = api_key_value # Type annotation ensures it's a string
- self.account_id = account_id or os.getenv("STACKONE_ACCOUNT_ID")
-
- def get_tools(self, vertical: str, account_id: str | None = None) -> Tools:
- """Get tools for a specific vertical.
-
- Args:
- vertical: The vertical to get tools for (e.g. "hris", "crm")
- account_id: Optional account ID override. If not provided, uses the one from initialization
- """
- spec_path = OAS_DIR / f"{vertical}.json"
- if not spec_path.exists():
- return Tools([]) # Return empty tools list for unknown vertical
-
- # Use account_id parameter if provided, otherwise use the one from initialization
- effective_account_id = account_id or self.account_id
-
- with open(spec_path) as f:
- spec = json.load(f)
-
- tools: list[StackOneBaseTool] = []
- paths = spec.get("paths", {})
-
- for path, methods in paths.items():
- for method, details in methods.items():
- # Skip if no x-speakeasy-name-override (indicates not a tool endpoint)
- if "x-speakeasy-name-override" not in details:
- continue
-
- name = details["x-speakeasy-name-override"]
- description = details.get("description", "")
- parameters = details.get("parameters", [])
-
- # Convert OpenAPI parameters to JSON Schema
- properties: dict[str, Any] = {}
- for param in parameters:
- if param["in"] == "path":
- properties[param["name"]] = {
- "type": param["schema"]["type"],
- "description": param.get("description", ""),
- }
-
- tool_def = ToolDefinition(
- description=description,
- parameters=ToolParameters(type="object", properties=properties),
- execute=ExecuteConfig(
- headers={},
- method=method.upper(),
- url=f"https://api.stackone.com{path}",
- name=name,
- ),
- )
-
- tool = StackOneTool(
- description=tool_def.description,
- parameters=tool_def.parameters,
- _execute_config=tool_def.execute,
- _api_key=self.api_key,
- _account_id=effective_account_id,
- )
- tools.append(tool)
-
- return Tools(tools)
diff --git a/packages/stackone-ai/tests/test_toolset.py b/packages/stackone-ai/tests/test_toolset.py
deleted file mode 100644
index 5238804..0000000
--- a/packages/stackone-ai/tests/test_toolset.py
+++ /dev/null
@@ -1,59 +0,0 @@
-from unittest.mock import MagicMock, patch
-
-from stackone_ai.toolset import StackOneToolSet
-
-
-def test_toolset_initialization():
- """Test StackOneToolSet initialization and tool creation"""
- mock_spec_content = {
- "paths": {
- "/employee/{id}": {
- "get": {
- "x-speakeasy-name-override": "get_employee",
- "description": "Get employee details",
- "parameters": [
- {
- "in": "path",
- "name": "id",
- "schema": {"type": "string"},
- "description": "Employee ID",
- }
- ],
- }
- }
- }
- }
-
- # Mock the file operations instead of load_specs
- with (
- patch("stackone_ai.toolset.OAS_DIR") as mock_dir,
- patch("json.load") as mock_json,
- ):
- # Setup mocks
- mock_path = MagicMock()
- mock_path.exists.return_value = True
- mock_dir.__truediv__.return_value = mock_path
- mock_json.return_value = mock_spec_content
-
- # Create and test toolset
- toolset = StackOneToolSet(api_key="test_key")
- tools = toolset.get_tools(vertical="hris", account_id="test_account")
-
- # Verify results
- assert len(tools) == 1
- tool = tools.get_tool("get_employee")
- assert tool is not None
- assert tool.description == "Get employee details"
- assert tool._api_key == "test_key"
- assert tool._account_id == "test_account"
-
- # Verify the tool parameters
- assert tool.parameters.properties["id"]["type"] == "string"
- assert tool.parameters.properties["id"]["description"] == "Employee ID"
-
-
-def test_unknown_vertical():
- """Test getting tools for unknown vertical"""
- toolset = StackOneToolSet(api_key="test_key")
- tools = toolset.get_tools(vertical="unknown")
- assert len(tools) == 0
diff --git a/pyproject.toml b/pyproject.toml
index a565084..1df2d34 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,9 +1,32 @@
[project]
-name = "stackone_ai-python"
-version = "0.1.0"
+name = "stackone-ai"
+version = "0.0.2"
description = "agents performing actions on your SaaS"
readme = "README.md"
requires-python = ">=3.11"
+authors = [
+ { name = "StackOne", email = "support@stackone.com" }
+]
+classifiers = [
+ "Development Status :: 4 - Beta",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: MIT License",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.11",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+]
+dependencies = [
+ "pydantic>=2.10.6",
+ "requests>=2.32.3",
+ "langchain-core>=0.1.0",
+]
+
+[build-system]
+requires = ["hatchling"]
+build-backend = "hatchling.build"
+
+[tool.hatch.build.targets.wheel]
+packages = ["stackone_ai"]
[project.optional-dependencies]
examples = [
@@ -19,12 +42,6 @@ pymdown-extensions = [
"mkdocs-material>=9.6.4",
]
-[tool.uv.workspace]
-members = ["packages/stackone-ai"]
-
-[tool.uv.sources]
-stackone-ai = { workspace = true }
-
[dependency-groups]
dev = [
"mypy>=1.15.0",
@@ -39,9 +56,6 @@ dev = [
]
[tool.pytest.ini_options]
-pythonpath = [
- "packages/stackone-ai",
-]
asyncio_mode = "strict"
asyncio_default_fixture_loop_scope = "function"
markers = [
@@ -52,7 +66,7 @@ markers = [
"bin/**.py" = ["T201", "T203"]
"scripts/**.py" = ["T201", "T203"]
"tests/**.py" = ["T201", "T203"]
-"examples/**.py" = ["T201", "T203"]
+"examples/**.py" = ["T201", "T203", "E501", "F841"]
[tool.ruff]
line-length = 110
diff --git a/scripts/pull_oas.py b/scripts/pull_oas.py
index aed39ae..7d57e80 100644
--- a/scripts/pull_oas.py
+++ b/scripts/pull_oas.py
@@ -19,7 +19,7 @@
STACKONE_DOCS_BASE = "https://docs.stackone.com"
STACKONE_DOCS_URL = f"{STACKONE_DOCS_BASE}/openapi"
-OAS_DIR = Path("packages/stackone-core/src/stackone_ai/oas")
+OAS_DIR = Path("stackone_ai/oas")
def get_api_specs() -> dict[str, str]:
diff --git a/packages/stackone-ai/stackone_ai/__init__.py b/stackone_ai/__init__.py
similarity index 100%
rename from packages/stackone-ai/stackone_ai/__init__.py
rename to stackone_ai/__init__.py
diff --git a/packages/stackone-ai/stackone_ai/constants.py b/stackone_ai/constants.py
similarity index 100%
rename from packages/stackone-ai/stackone_ai/constants.py
rename to stackone_ai/constants.py
diff --git a/stackone_ai/models.py b/stackone_ai/models.py
new file mode 100644
index 0000000..52bc2ae
--- /dev/null
+++ b/stackone_ai/models.py
@@ -0,0 +1,419 @@
+import base64
+import json
+from collections.abc import Sequence
+from enum import Enum
+from typing import Annotated, Any, TypeAlias, cast
+
+import requests
+from langchain_core.tools import BaseTool
+from pydantic import BaseModel, BeforeValidator, Field, PrivateAttr
+from requests.exceptions import RequestException
+
+# Type aliases for common types
+JsonDict: TypeAlias = dict[str, Any]
+Headers: TypeAlias = dict[str, str]
+
+
+class StackOneError(Exception):
+ """Base exception for StackOne errors"""
+
+ pass
+
+
+class StackOneAPIError(StackOneError):
+ """Raised when the StackOne API returns an error"""
+
+ def __init__(self, message: str, status_code: int, response_body: Any) -> None:
+ super().__init__(message)
+ self.status_code = status_code
+ self.response_body = response_body
+
+
+class ParameterLocation(str, Enum):
+ """Valid locations for parameters in requests"""
+
+ HEADER = "header"
+ QUERY = "query"
+ PATH = "path"
+ BODY = "body"
+ FILE = "file" # For file uploads
+
+
+def validate_method(v: str) -> str:
+ """Validate HTTP method is uppercase and supported"""
+ method = v.upper()
+ if method not in {"GET", "POST", "PUT", "DELETE", "PATCH"}:
+ raise ValueError(f"Unsupported HTTP method: {method}")
+ return method
+
+
+class ExecuteConfig(BaseModel):
+ """Configuration for executing a tool against an API endpoint"""
+
+ headers: Headers = Field(default_factory=dict, description="HTTP headers to include in the request")
+ method: Annotated[str, BeforeValidator(validate_method)] = Field(description="HTTP method to use")
+ url: str = Field(description="API endpoint URL")
+ name: str = Field(description="Tool name")
+ body_type: str | None = Field(default=None, description="Content type for request body")
+ parameter_locations: dict[str, ParameterLocation] = Field(
+ default_factory=dict, description="Maps parameter names to their location in the request"
+ )
+
+
+class ToolParameters(BaseModel):
+ """Schema definition for tool parameters"""
+
+ type: str = Field(description="JSON Schema type")
+ properties: JsonDict = Field(description="JSON Schema properties")
+
+
+class ToolDefinition(BaseModel):
+ """Complete definition of a tool including its schema and execution config"""
+
+ description: str = Field(description="Tool description")
+ parameters: ToolParameters = Field(description="Tool parameter schema")
+ execute: ExecuteConfig = Field(description="Tool execution configuration")
+
+
+class StackOneTool(BaseModel):
+ """Base class for all StackOne tools. Provides functionality for executing API calls
+ and converting to various formats (OpenAI, LangChain)."""
+
+ name: str = Field(description="Tool name")
+ description: str = Field(description="Tool description")
+ parameters: ToolParameters = Field(description="Tool parameters")
+ _execute_config: ExecuteConfig = PrivateAttr()
+ _api_key: str = PrivateAttr()
+ _account_id: str | None = PrivateAttr(default=None)
+
+ def __init__(
+ self,
+ description: str,
+ parameters: ToolParameters,
+ _execute_config: ExecuteConfig,
+ _api_key: str,
+ _account_id: str | None = None,
+ ) -> None:
+ super().__init__(
+ name=_execute_config.name,
+ description=description,
+ parameters=parameters,
+ )
+ self._execute_config = _execute_config
+ self._api_key = _api_key
+ self._account_id = _account_id
+
+ def _prepare_headers(self) -> Headers:
+ """Prepare headers for the API request
+
+ Returns:
+ Headers to use in the request
+ """
+ auth_string = base64.b64encode(f"{self._api_key}:".encode()).decode()
+ headers: Headers = {
+ "Authorization": f"Basic {auth_string}",
+ "User-Agent": "stackone-python/1.0.0",
+ }
+
+ if self._account_id:
+ headers["x-account-id"] = self._account_id
+
+ # Add predefined headers
+ headers.update(self._execute_config.headers)
+ return headers
+
+ def _prepare_request_params(self, kwargs: JsonDict) -> tuple[str, JsonDict, JsonDict]:
+ """Prepare URL and parameters for the API request
+
+ Args:
+ kwargs: Arguments to process
+
+ Returns:
+ Tuple of (url, body_params, query_params)
+ """
+ url = self._execute_config.url
+ body_params: JsonDict = {}
+ query_params: JsonDict = {}
+
+ for key, value in kwargs.items():
+ param_location = self._execute_config.parameter_locations.get(key)
+
+ match param_location:
+ case ParameterLocation.PATH:
+ url = url.replace(f"{{{key}}}", str(value))
+ case ParameterLocation.QUERY:
+ query_params[key] = value
+ case ParameterLocation.BODY | ParameterLocation.FILE:
+ body_params[key] = value
+ case _:
+ # Default behavior
+ if f"{{{key}}}" in url:
+ url = url.replace(f"{{{key}}}", str(value))
+ elif self._execute_config.method in {"GET", "DELETE"}:
+ query_params[key] = value
+ else:
+ body_params[key] = value
+
+ return url, body_params, query_params
+
+ def execute(self, arguments: str | JsonDict | None = None) -> JsonDict:
+ """Execute the tool with the given parameters
+
+ Args:
+ arguments: Tool arguments as string or dict
+
+ Returns:
+ API response as dict
+
+ Raises:
+ StackOneAPIError: If the API request fails
+ ValueError: If the arguments are invalid
+ """
+ try:
+ # Parse arguments
+ if isinstance(arguments, str):
+ kwargs = json.loads(arguments)
+ else:
+ kwargs = arguments or {}
+
+ # Prepare request
+ headers = self._prepare_headers()
+ url, body_params, query_params = self._prepare_request_params(kwargs)
+
+ request_kwargs: dict[str, Any] = {
+ "method": self._execute_config.method,
+ "url": url,
+ "headers": headers,
+ }
+
+ if body_params:
+ body_type = self._execute_config.body_type or "json"
+ if body_type == "json":
+ request_kwargs["json"] = body_params
+ elif body_type == "form":
+ request_kwargs["data"] = body_params
+
+ if query_params:
+ request_kwargs["params"] = query_params
+
+ response = requests.request(**request_kwargs)
+ response.raise_for_status()
+
+ # Ensure we return a dict
+ result = response.json()
+ return cast(JsonDict, result) if isinstance(result, dict) else {"result": result}
+
+ except json.JSONDecodeError as e:
+ raise ValueError(f"Invalid JSON in arguments: {e}") from e
+ except RequestException as e:
+ if hasattr(e, "response") and e.response is not None:
+ raise StackOneAPIError(
+ str(e),
+ e.response.status_code,
+ e.response.json() if e.response.text else None,
+ ) from e
+ raise StackOneError(f"Request failed: {e}") from e
+
+ def to_openai_function(self) -> JsonDict:
+ """Convert this tool to OpenAI's function format
+
+ Returns:
+ Tool definition in OpenAI function format
+ """
+ # Clean properties and handle special types
+ properties = {}
+ required = []
+
+ for name, prop in self.parameters.properties.items():
+ if isinstance(prop, dict):
+ # Only keep standard JSON Schema properties
+ cleaned_prop = {}
+
+ # Copy basic properties
+ if "type" in prop:
+ cleaned_prop["type"] = prop["type"]
+ if "description" in prop:
+ cleaned_prop["description"] = prop["description"]
+ if "enum" in prop:
+ cleaned_prop["enum"] = prop["enum"]
+
+ # Handle array types
+ if cleaned_prop.get("type") == "array" and "items" in prop:
+ if isinstance(prop["items"], dict):
+ cleaned_prop["items"] = {
+ k: v for k, v in prop["items"].items() if k in ("type", "description", "enum")
+ }
+
+ # Handle object types
+ if cleaned_prop.get("type") == "object" and "properties" in prop:
+ cleaned_prop["properties"] = {
+ k: {sk: sv for sk, sv in v.items() if sk in ("type", "description", "enum")}
+ for k, v in prop["properties"].items()
+ }
+
+ # Handle required fields - if not explicitly nullable
+ if not prop.get("nullable", False):
+ required.append(name)
+
+ properties[name] = cleaned_prop
+ else:
+ properties[name] = {"type": "string"}
+ required.append(name)
+
+ # Create the OpenAI function schema
+ parameters = {
+ "type": "object",
+ "properties": properties,
+ }
+
+ # Only include required if there are required fields
+ if required:
+ parameters["required"] = required
+
+ return {
+ "type": "function",
+ "function": {
+ "name": self.name,
+ "description": self.description,
+ "parameters": parameters,
+ },
+ }
+
+ def to_langchain(self) -> BaseTool:
+ """Convert this tool to LangChain format
+
+ Returns:
+ Tool in LangChain format
+ """
+ # Create properly annotated schema for the tool
+ schema_props: dict[str, Any] = {}
+ annotations: dict[str, Any] = {}
+
+ for name, details in self.parameters.properties.items():
+ python_type: type = str # Default to str
+ if isinstance(details, dict):
+ type_str = details.get("type", "string")
+ match type_str:
+ case "number":
+ python_type = float
+ case "integer":
+ python_type = int
+ case "boolean":
+ python_type = bool
+
+ field = Field(description=details.get("description", ""))
+ else:
+ field = Field(description="")
+
+ schema_props[name] = field
+ annotations[name] = python_type
+
+ # Create the schema class with proper annotations
+ schema_class = type(
+ f"{self.name.title()}Args",
+ (BaseModel,),
+ {
+ "__annotations__": annotations,
+ "__module__": __name__,
+ **schema_props,
+ },
+ )
+
+ parent_tool = self
+
+ class StackOneLangChainTool(BaseTool):
+ name: str = parent_tool.name
+ description: str = parent_tool.description
+ args_schema: type[BaseModel] = schema_class
+ func = staticmethod(parent_tool.execute) # Required by CrewAI
+
+ def _run(self, **kwargs: Any) -> Any:
+ return parent_tool.execute(kwargs)
+
+ async def _arun(self, **kwargs: Any) -> Any:
+ return self._run(**kwargs)
+
+ return StackOneLangChainTool()
+
+ def set_account_id(self, account_id: str | None) -> None:
+ """Set the account ID for this tool
+
+ Args:
+ account_id: The account ID to use, or None to clear it
+ """
+ self._account_id = account_id
+
+ def get_account_id(self) -> str | None:
+ """Get the current account ID for this tool
+
+ Returns:
+ Current account ID or None if not set
+ """
+ return self._account_id
+
+
+class Tools:
+ """Container for Tool instances with lookup capabilities"""
+
+ def __init__(self, tools: list[StackOneTool]) -> None:
+ """Initialize Tools container
+
+ Args:
+ tools: List of Tool instances to manage
+ """
+ self.tools = tools
+ self._tool_map = {tool.name: tool for tool in tools}
+
+ def __getitem__(self, index: int) -> StackOneTool:
+ return self.tools[index]
+
+ def __len__(self) -> int:
+ return len(self.tools)
+
+ def get_tool(self, name: str) -> StackOneTool | None:
+ """Get a tool by its name
+
+ Args:
+ name: Name of the tool to retrieve
+
+ Returns:
+ The tool if found, None otherwise
+ """
+ return self._tool_map.get(name)
+
+ def set_account_id(self, account_id: str | None) -> None:
+ """Set the account ID for all tools in this collection
+
+ Args:
+ account_id: The account ID to use, or None to clear it
+ """
+ for tool in self.tools:
+ tool.set_account_id(account_id)
+
+ def get_account_id(self) -> str | None:
+ """Get the current account ID for this collection
+
+ Returns:
+ The first non-None account ID found, or None if none set
+ """
+ for tool in self.tools:
+ account_id = tool.get_account_id()
+ if isinstance(account_id, str):
+ return account_id
+ return None
+
+ def to_openai(self) -> list[JsonDict]:
+ """Convert all tools to OpenAI function format
+
+ Returns:
+ List of tools in OpenAI function format
+ """
+ return [tool.to_openai_function() for tool in self.tools]
+
+ def to_langchain(self) -> Sequence[BaseTool]:
+ """Convert all tools to LangChain format
+
+ Returns:
+ Sequence of tools in LangChain format
+ """
+ return [tool.to_langchain() for tool in self.tools]
diff --git a/packages/stackone-ai/stackone_ai/oas/ats.json b/stackone_ai/oas/ats.json
similarity index 100%
rename from packages/stackone-ai/stackone_ai/oas/ats.json
rename to stackone_ai/oas/ats.json
diff --git a/packages/stackone-ai/stackone_ai/oas/core.json b/stackone_ai/oas/core.json
similarity index 100%
rename from packages/stackone-ai/stackone_ai/oas/core.json
rename to stackone_ai/oas/core.json
diff --git a/packages/stackone-ai/stackone_ai/oas/crm.json b/stackone_ai/oas/crm.json
similarity index 100%
rename from packages/stackone-ai/stackone_ai/oas/crm.json
rename to stackone_ai/oas/crm.json
diff --git a/packages/stackone-ai/stackone_ai/oas/documents.json b/stackone_ai/oas/documents.json
similarity index 100%
rename from packages/stackone-ai/stackone_ai/oas/documents.json
rename to stackone_ai/oas/documents.json
diff --git a/packages/stackone-ai/stackone_ai/oas/hris.json b/stackone_ai/oas/hris.json
similarity index 100%
rename from packages/stackone-ai/stackone_ai/oas/hris.json
rename to stackone_ai/oas/hris.json
diff --git a/packages/stackone-ai/stackone_ai/oas/iam.json b/stackone_ai/oas/iam.json
similarity index 100%
rename from packages/stackone-ai/stackone_ai/oas/iam.json
rename to stackone_ai/oas/iam.json
diff --git a/packages/stackone-ai/stackone_ai/oas/lms.json b/stackone_ai/oas/lms.json
similarity index 100%
rename from packages/stackone-ai/stackone_ai/oas/lms.json
rename to stackone_ai/oas/lms.json
diff --git a/packages/stackone-ai/stackone_ai/oas/marketing.json b/stackone_ai/oas/marketing.json
similarity index 100%
rename from packages/stackone-ai/stackone_ai/oas/marketing.json
rename to stackone_ai/oas/marketing.json
diff --git a/packages/stackone-ai/stackone_ai/specs/loader.py b/stackone_ai/specs/loader.py
similarity index 100%
rename from packages/stackone-ai/stackone_ai/specs/loader.py
rename to stackone_ai/specs/loader.py
diff --git a/packages/stackone-ai/stackone_ai/specs/parser.py b/stackone_ai/specs/parser.py
similarity index 69%
rename from packages/stackone-ai/stackone_ai/specs/parser.py
rename to stackone_ai/specs/parser.py
index 1f72b9b..1ac2ed0 100644
--- a/packages/stackone-ai/stackone_ai/specs/parser.py
+++ b/stackone_ai/specs/parser.py
@@ -14,6 +14,28 @@ def __init__(self, spec_path: Path):
servers = self.spec.get("servers", [{"url": "https://api.stackone.com"}])
self.base_url = servers[0]["url"] if isinstance(servers, list) else "https://api.stackone.com"
+ def _is_file_type(self, schema: dict[str, Any]) -> bool:
+ """Check if a schema represents a file upload."""
+ return schema.get("type") == "string" and schema.get("format") == "binary"
+
+ def _convert_to_file_type(self, schema: dict[str, Any]) -> None:
+ """Convert a binary string schema to a file type."""
+ if self._is_file_type(schema):
+ schema["type"] = "file"
+
+ def _handle_file_properties(self, schema: dict[str, Any]) -> None:
+ """Process schema properties to handle file uploads."""
+ if "properties" not in schema:
+ return
+
+ for prop_schema in schema["properties"].values():
+ # Handle direct file uploads
+ self._convert_to_file_type(prop_schema)
+
+ # Handle array of files
+ if prop_schema.get("type") == "array" and "items" in prop_schema:
+ self._convert_to_file_type(prop_schema["items"])
+
def _resolve_schema_ref(
self, ref: str, visited: set[str] | None = None
) -> dict[str, Any] | list[Any] | str:
@@ -99,6 +121,25 @@ def _resolve_schema(
return resolved
+ def _parse_content_schema(
+ self, content_type: str, content: dict[str, Any]
+ ) -> tuple[dict[str, Any] | None, str | None]:
+ """Parse schema from content object for a specific content type."""
+ if content_type not in content:
+ return None, None
+
+ type_content = content[content_type]
+ if not isinstance(type_content, dict):
+ return None, None
+
+ schema = type_content.get("schema", {})
+ resolved = self._resolve_schema(schema)
+
+ if not isinstance(resolved, dict):
+ return None, None
+
+ return resolved, content_type.split("/")[-1]
+
def _parse_request_body(self, operation: dict) -> tuple[dict[str, Any] | None, str | None]:
"""Parse request body schema and content type from operation"""
request_body = operation.get("requestBody", {})
@@ -107,30 +148,32 @@ def _parse_request_body(self, operation: dict) -> tuple[dict[str, Any] | None, s
content = request_body.get("content", {})
- # Handle application/json
- if "application/json" in content:
- json_content = content["application/json"]
- if isinstance(json_content, dict):
- schema = json_content.get("schema", {})
- resolved = self._resolve_schema(schema)
- # Ensure we only return dict for request body
- if isinstance(resolved, dict):
- return resolved, "json"
- return None, None
-
- # Handle form data
- if "application/x-www-form-urlencoded" in content:
- form_content = content["application/x-www-form-urlencoded"]
- if isinstance(form_content, dict):
- schema = form_content.get("schema", {})
- resolved = self._resolve_schema(schema)
- # Ensure we only return dict for request body
- if isinstance(resolved, dict):
- return resolved, "form"
- return None, None
+ # Try JSON first
+ schema, body_type = self._parse_content_schema("application/json", content)
+ if schema:
+ return schema, body_type
+
+ # Try multipart form-data (file uploads)
+ schema, _ = self._parse_content_schema("multipart/form-data", content)
+ if schema:
+ self._handle_file_properties(schema)
+ return schema, "multipart"
+
+ # Try form-urlencoded
+ schema, body_type = self._parse_content_schema("application/x-www-form-urlencoded", content)
+ if schema:
+ return schema, "form"
return None, None
+ def _get_parameter_location(self, prop_schema: dict[str, Any]) -> str:
+ """Determine the parameter location based on schema type."""
+ if prop_schema.get("type") == "file":
+ return "file"
+ if prop_schema.get("type") == "array" and prop_schema.get("items", {}).get("type") == "file":
+ return "file"
+ return "body"
+
def parse_tools(self) -> dict[str, ToolDefinition]:
"""Parse OpenAPI spec into tool definitions"""
tools = {}
@@ -138,7 +181,6 @@ def parse_tools(self) -> dict[str, ToolDefinition]:
for path, path_item in self.spec.get("paths", {}).items():
for method, operation in path_item.items():
name = operation.get("operationId")
-
if not name:
raise ValueError(f"Operation ID is required for tool parsing: {operation}")
@@ -164,10 +206,9 @@ def parse_tools(self) -> dict[str, ToolDefinition]:
# Add request body properties if present
if request_body_schema and isinstance(request_body_schema, dict):
body_props = request_body_schema.get("properties", {})
- properties.update(body_props)
- # Mark all body parameters
- for prop_name in body_props:
- parameter_locations[prop_name] = "body"
+ for prop_name, prop_schema in body_props.items():
+ properties[prop_name] = prop_schema
+ parameter_locations[prop_name] = self._get_parameter_location(prop_schema)
# Create tool definition
tools[name] = ToolDefinition(
diff --git a/stackone_ai/toolset.py b/stackone_ai/toolset.py
new file mode 100644
index 0000000..0e5cb2d
--- /dev/null
+++ b/stackone_ai/toolset.py
@@ -0,0 +1,156 @@
+import fnmatch
+import os
+import warnings
+from typing import Any
+
+from stackone_ai.constants import OAS_DIR
+from stackone_ai.models import (
+ StackOneTool,
+ Tools,
+)
+from stackone_ai.specs.parser import OpenAPIParser
+
+
+class ToolsetError(Exception):
+ """Base exception for toolset errors"""
+
+ pass
+
+
+class ToolsetConfigError(ToolsetError):
+ """Raised when there is an error in the toolset configuration"""
+
+ pass
+
+
+class ToolsetLoadError(ToolsetError):
+ """Raised when there is an error loading tools"""
+
+ pass
+
+
+class StackOneToolSet:
+ """Main class for accessing StackOne tools"""
+
+ def __init__(
+ self,
+ api_key: str | None = None,
+ account_id: str | None = None,
+ ) -> None:
+ """Initialize StackOne tools with authentication
+
+ Args:
+ api_key: Optional API key. If not provided, will try to get from STACKONE_API_KEY env var
+ account_id: Optional account ID. If not provided, will try to get from STACKONE_ACCOUNT_ID env var
+
+ Raises:
+ ToolsetConfigError: If no API key is provided or found in environment
+ """
+ api_key_value = api_key or os.getenv("STACKONE_API_KEY")
+ if not api_key_value:
+ raise ToolsetConfigError(
+ "API key must be provided either through api_key parameter or "
+ "STACKONE_API_KEY environment variable"
+ )
+ self.api_key: str = api_key_value
+ self.account_id = account_id or os.getenv("STACKONE_ACCOUNT_ID")
+
+ def _parse_parameters(self, parameters: list[dict[str, Any]]) -> dict[str, dict[str, str]]:
+ """Parse OpenAPI parameters into tool properties
+
+ Args:
+ parameters: List of OpenAPI parameter objects
+
+ Returns:
+ Dict of parameter properties with name as key and schema details as value
+ """
+ properties: dict[str, dict[str, str]] = {}
+ for param in parameters:
+ if param["in"] == "path":
+ # Ensure we only include string values in the nested dict
+ param_schema = param["schema"]
+ properties[param["name"]] = {
+ "type": str(param_schema["type"]),
+ "description": str(param.get("description", "")),
+ }
+ return properties
+
+ def _matches_filter(self, tool_name: str, filter_pattern: str | list[str]) -> bool:
+ """Check if a tool name matches the filter pattern
+
+ Args:
+ tool_name: Name of the tool to check
+ filter_pattern: String or list of glob patterns to match against.
+ Patterns starting with ! are treated as negative matches.
+
+ Returns:
+ True if the tool name matches any positive pattern and no negative patterns,
+ False otherwise
+ """
+ patterns = [filter_pattern] if isinstance(filter_pattern, str) else filter_pattern
+
+ # Split into positive and negative patterns
+ positive_patterns = [p for p in patterns if not p.startswith("!")]
+ negative_patterns = [p[1:] for p in patterns if p.startswith("!")]
+
+ # If no positive patterns, treat as match all
+ matches_positive = (
+ any(fnmatch.fnmatch(tool_name, p) for p in positive_patterns) if positive_patterns else True
+ )
+
+ # If any negative pattern matches, exclude the tool
+ matches_negative = any(fnmatch.fnmatch(tool_name, p) for p in negative_patterns)
+
+ return matches_positive and not matches_negative
+
+ def get_tools(
+ self, filter_pattern: str | list[str] | None = None, *, account_id: str | None = None
+ ) -> Tools:
+ """Get tools matching the specified filter pattern
+
+ Args:
+ filter_pattern: Optional glob pattern or list of patterns to filter tools
+ (e.g. "hris_*", ["crm_*", "ats_*"])
+ account_id: Optional account ID override. If not provided, uses the one from initialization
+
+ Returns:
+ Collection of tools matching the filter pattern
+
+ Raises:
+ ToolsetLoadError: If there is an error loading the tools
+ """
+ if filter_pattern is None:
+ warnings.warn(
+ "No filter pattern provided. Loading all tools may exceed context windows in "
+ "AI applications.",
+ UserWarning,
+ stacklevel=2,
+ )
+
+ try:
+ all_tools: list[StackOneTool] = []
+ effective_account_id = account_id or self.account_id
+
+ # Load all available specs
+ for spec_file in OAS_DIR.glob("*.json"):
+ parser = OpenAPIParser(spec_file)
+ tool_definitions = parser.parse_tools()
+
+ # Create tools and filter if pattern is provided
+ for _, tool_def in tool_definitions.items():
+ if filter_pattern is None or self._matches_filter(tool_def.execute.name, filter_pattern):
+ tool = StackOneTool(
+ description=tool_def.description,
+ parameters=tool_def.parameters,
+ _execute_config=tool_def.execute,
+ _api_key=self.api_key,
+ _account_id=effective_account_id,
+ )
+ all_tools.append(tool)
+
+ return Tools(all_tools)
+
+ except Exception as e:
+ if isinstance(e, ToolsetError):
+ raise
+ raise ToolsetLoadError(f"Error loading tools: {e}") from e
diff --git a/packages/stackone-ai/tests/snapshots/test_parser/test_parse_all_oas_specs/ats_tools.json b/tests/snapshots/test_parser/test_parse_all_oas_specs/ats_tools.json
similarity index 100%
rename from packages/stackone-ai/tests/snapshots/test_parser/test_parse_all_oas_specs/ats_tools.json
rename to tests/snapshots/test_parser/test_parse_all_oas_specs/ats_tools.json
diff --git a/packages/stackone-ai/tests/snapshots/test_parser/test_parse_all_oas_specs/core_tools.json b/tests/snapshots/test_parser/test_parse_all_oas_specs/core_tools.json
similarity index 100%
rename from packages/stackone-ai/tests/snapshots/test_parser/test_parse_all_oas_specs/core_tools.json
rename to tests/snapshots/test_parser/test_parse_all_oas_specs/core_tools.json
diff --git a/packages/stackone-ai/tests/snapshots/test_parser/test_parse_all_oas_specs/crm_tools.json b/tests/snapshots/test_parser/test_parse_all_oas_specs/crm_tools.json
similarity index 100%
rename from packages/stackone-ai/tests/snapshots/test_parser/test_parse_all_oas_specs/crm_tools.json
rename to tests/snapshots/test_parser/test_parse_all_oas_specs/crm_tools.json
diff --git a/packages/stackone-ai/tests/snapshots/test_parser/test_parse_all_oas_specs/documents_tools.json b/tests/snapshots/test_parser/test_parse_all_oas_specs/documents_tools.json
similarity index 100%
rename from packages/stackone-ai/tests/snapshots/test_parser/test_parse_all_oas_specs/documents_tools.json
rename to tests/snapshots/test_parser/test_parse_all_oas_specs/documents_tools.json
diff --git a/packages/stackone-ai/tests/snapshots/test_parser/test_parse_all_oas_specs/hris_tools.json b/tests/snapshots/test_parser/test_parse_all_oas_specs/hris_tools.json
similarity index 100%
rename from packages/stackone-ai/tests/snapshots/test_parser/test_parse_all_oas_specs/hris_tools.json
rename to tests/snapshots/test_parser/test_parse_all_oas_specs/hris_tools.json
diff --git a/packages/stackone-ai/tests/snapshots/test_parser/test_parse_all_oas_specs/iam_tools.json b/tests/snapshots/test_parser/test_parse_all_oas_specs/iam_tools.json
similarity index 100%
rename from packages/stackone-ai/tests/snapshots/test_parser/test_parse_all_oas_specs/iam_tools.json
rename to tests/snapshots/test_parser/test_parse_all_oas_specs/iam_tools.json
diff --git a/packages/stackone-ai/tests/snapshots/test_parser/test_parse_all_oas_specs/lms_tools.json b/tests/snapshots/test_parser/test_parse_all_oas_specs/lms_tools.json
similarity index 100%
rename from packages/stackone-ai/tests/snapshots/test_parser/test_parse_all_oas_specs/lms_tools.json
rename to tests/snapshots/test_parser/test_parse_all_oas_specs/lms_tools.json
diff --git a/packages/stackone-ai/tests/snapshots/test_parser/test_parse_all_oas_specs/marketing_tools.json b/tests/snapshots/test_parser/test_parse_all_oas_specs/marketing_tools.json
similarity index 100%
rename from packages/stackone-ai/tests/snapshots/test_parser/test_parse_all_oas_specs/marketing_tools.json
rename to tests/snapshots/test_parser/test_parse_all_oas_specs/marketing_tools.json
diff --git a/packages/stackone-ai/tests/test_tools.py b/tests/test_models.py
similarity index 99%
rename from packages/stackone-ai/tests/test_tools.py
rename to tests/test_models.py
index f2e12f7..834e831 100644
--- a/packages/stackone-ai/tests/test_tools.py
+++ b/tests/test_models.py
@@ -3,13 +3,14 @@
import pytest
from langchain_core.tools import BaseTool as LangChainBaseTool
+
from stackone_ai.models import (
ExecuteConfig,
+ StackOneTool,
ToolDefinition,
ToolParameters,
Tools,
)
-from stackone_ai.tools import StackOneTool
@pytest.fixture
diff --git a/packages/stackone-ai/tests/test_parser.py b/tests/test_parser.py
similarity index 70%
rename from packages/stackone-ai/tests/test_parser.py
rename to tests/test_parser.py
index cbf4961..14290a7 100644
--- a/packages/stackone-ai/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -3,6 +3,7 @@
from typing import Any
import pytest
+
from stackone_ai.specs.parser import OpenAPIParser
@@ -361,7 +362,7 @@ def test_circular_reference_detection(nested_parser: OpenAPIParser) -> None:
@pytest.fixture
def oas_specs() -> list[tuple[str, dict[str, Any]]]:
"""Load all OpenAPI specs from the oas directory"""
- oas_dir = Path("packages/stackone-ai/stackone_ai/oas")
+ oas_dir = Path("stackone_ai/oas")
specs = []
for spec_file in oas_dir.glob("*.json"):
@@ -487,3 +488,214 @@ def test_resolve_schema_with_allof(tmp_path: Path) -> None:
assert "phone" in candidate_schema["properties"] # From CreateCandidate extension
assert candidate_schema["description"] == "Candidate Properties"
assert candidate_schema["nullable"] is True
+
+
+@pytest.fixture
+def temp_spec_file(tmp_path: Path) -> Path:
+ """Create a temporary OpenAPI spec file for testing."""
+
+ def write_spec(spec: dict[str, Any]) -> Path:
+ spec_file = tmp_path / "test_spec.json"
+ with open(spec_file, "w") as f:
+ json.dump(spec, f)
+ return spec_file
+
+ return write_spec
+
+
+def test_parse_file_upload(temp_spec_file: Path) -> None:
+ """Test parsing an OpenAPI spec with file upload endpoints."""
+ spec = {
+ "openapi": "3.0.0",
+ "info": {"title": "Test API", "version": "1.0.0"},
+ "paths": {
+ "/upload": {
+ "post": {
+ "operationId": "uploadFile",
+ "summary": "Upload a file",
+ "requestBody": {
+ "content": {
+ "multipart/form-data": {
+ "schema": {
+ "type": "object",
+ "properties": {
+ "file": {
+ "type": "string",
+ "format": "binary",
+ "description": "The file to upload",
+ },
+ "description": {"type": "string", "description": "File description"},
+ },
+ "required": ["file"],
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ parser = OpenAPIParser(temp_spec_file(spec))
+ tools = parser.parse_tools()
+
+ assert "uploadFile" in tools
+ tool = tools["uploadFile"]
+
+ # Check file parameter is correctly marked
+ assert "file" in tool.parameters.properties
+ assert tool.parameters.properties["file"]["type"] == "file"
+ assert tool.execute.parameter_locations["file"] == "file"
+
+ # Check non-file parameter
+ assert "description" in tool.parameters.properties
+ assert tool.parameters.properties["description"]["type"] == "string"
+ assert tool.execute.parameter_locations["description"] == "body"
+
+ # Check body type
+ assert tool.execute.body_type == "multipart"
+
+
+def test_parse_multiple_files(temp_spec_file: Path) -> None:
+ """Test parsing an endpoint that accepts multiple files."""
+ spec = {
+ "openapi": "3.0.0",
+ "info": {"title": "Test API", "version": "1.0.0"},
+ "paths": {
+ "/upload-multiple": {
+ "post": {
+ "operationId": "uploadMultipleFiles",
+ "summary": "Upload multiple files",
+ "requestBody": {
+ "content": {
+ "multipart/form-data": {
+ "schema": {
+ "type": "object",
+ "properties": {
+ "files": {
+ "type": "array",
+ "items": {"type": "string", "format": "binary"},
+ "description": "Multiple files to upload",
+ },
+ "metadata": {
+ "type": "object",
+ "properties": {"category": {"type": "string"}},
+ },
+ },
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ parser = OpenAPIParser(temp_spec_file(spec))
+ tools = parser.parse_tools()
+
+ assert "uploadMultipleFiles" in tools
+ tool = tools["uploadMultipleFiles"]
+
+ # Check array of files
+ assert "files" in tool.parameters.properties
+ assert tool.parameters.properties["files"]["type"] == "array"
+ assert tool.parameters.properties["files"]["items"]["type"] == "file"
+ assert tool.execute.parameter_locations["files"] == "file"
+
+ # Check nested object parameter
+ assert "metadata" in tool.parameters.properties
+ assert tool.parameters.properties["metadata"]["type"] == "object"
+ assert tool.execute.parameter_locations["metadata"] == "body"
+
+
+def test_mixed_parameter_types(temp_spec_file: Path) -> None:
+ """Test parsing an endpoint with mixed parameter types (path, query, file)."""
+ spec = {
+ "openapi": "3.0.0",
+ "info": {"title": "Test API", "version": "1.0.0"},
+ "paths": {
+ "/users/{userId}/files": {
+ "post": {
+ "operationId": "uploadUserFile",
+ "summary": "Upload a user file",
+ "parameters": [
+ {"name": "userId", "in": "path", "required": True, "schema": {"type": "string"}},
+ {"name": "overwrite", "in": "query", "schema": {"type": "boolean"}},
+ ],
+ "requestBody": {
+ "content": {
+ "multipart/form-data": {
+ "schema": {
+ "type": "object",
+ "properties": {"file": {"type": "string", "format": "binary"}},
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ parser = OpenAPIParser(temp_spec_file(spec))
+ tools = parser.parse_tools()
+
+ assert "uploadUserFile" in tools
+ tool = tools["uploadUserFile"]
+
+ # Check path parameter
+ assert tool.execute.parameter_locations["userId"] == "path"
+ assert tool.parameters.properties["userId"]["type"] == "string"
+
+ # Check query parameter
+ assert tool.execute.parameter_locations["overwrite"] == "query"
+ assert tool.parameters.properties["overwrite"]["type"] == "boolean"
+
+ # Check file parameter
+ assert tool.execute.parameter_locations["file"] == "file"
+ assert tool.parameters.properties["file"]["type"] == "file"
+
+ # Check body type
+ assert tool.execute.body_type == "multipart"
+
+
+def test_form_data_without_files(temp_spec_file: Path) -> None:
+ """Test parsing form data without file uploads."""
+ spec = {
+ "openapi": "3.0.0",
+ "info": {"title": "Test API", "version": "1.0.0"},
+ "paths": {
+ "/submit-form": {
+ "post": {
+ "operationId": "submitForm",
+ "summary": "Submit a form",
+ "requestBody": {
+ "content": {
+ "application/x-www-form-urlencoded": {
+ "schema": {
+ "type": "object",
+ "properties": {"name": {"type": "string"}, "age": {"type": "integer"}},
+ }
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+
+ parser = OpenAPIParser(temp_spec_file(spec))
+ tools = parser.parse_tools()
+
+ assert "submitForm" in tools
+ tool = tools["submitForm"]
+
+ # Check form parameters
+ assert tool.execute.parameter_locations["name"] == "body"
+ assert tool.execute.parameter_locations["age"] == "body"
+ assert tool.parameters.properties["name"]["type"] == "string"
+ assert tool.parameters.properties["age"]["type"] == "integer"
+
+ # Check body type
+ assert tool.execute.body_type == "form"
diff --git a/tests/test_toolset.py b/tests/test_toolset.py
new file mode 100644
index 0000000..8e5a1e3
--- /dev/null
+++ b/tests/test_toolset.py
@@ -0,0 +1,87 @@
+from unittest.mock import MagicMock, patch
+
+from stackone_ai.models import ExecuteConfig, ToolDefinition, ToolParameters
+from stackone_ai.toolset import StackOneToolSet
+
+
+def test_toolset_initialization():
+ """Test StackOneToolSet initialization and tool creation"""
+ mock_spec_content = {
+ "paths": {
+ "/employee/{id}": {
+ "get": {
+ "operationId": "hris_get_employee",
+ "summary": "Get employee details",
+ "parameters": [
+ {
+ "in": "path",
+ "name": "id",
+ "schema": {"type": "string"},
+ "description": "Employee ID",
+ }
+ ],
+ }
+ }
+ }
+ }
+
+ # Create mock tool definition
+ mock_tool_def = ToolDefinition(
+ description="Get employee details",
+ parameters=ToolParameters(
+ type="object",
+ properties={
+ "id": {
+ "type": "string",
+ "description": "Employee ID",
+ }
+ },
+ ),
+ execute=ExecuteConfig(
+ method="GET",
+ url="https://api.stackone.com/employee/{id}",
+ name="hris_get_employee",
+ headers={},
+ parameter_locations={"id": "path"},
+ ),
+ )
+
+ # Mock the OpenAPIParser and file operations
+ with (
+ patch("stackone_ai.toolset.OAS_DIR") as mock_dir,
+ patch("stackone_ai.toolset.OpenAPIParser") as mock_parser_class,
+ ):
+ # Setup mocks
+ mock_path = MagicMock()
+ mock_path.exists.return_value = True
+ mock_dir.__truediv__.return_value = mock_path
+ mock_dir.glob.return_value = [mock_path]
+
+ # Setup parser mock
+ mock_parser = MagicMock()
+ mock_parser.spec = mock_spec_content
+ mock_parser.parse_tools.return_value = {"hris_get_employee": mock_tool_def}
+ mock_parser_class.return_value = mock_parser
+
+ # Create and test toolset
+ toolset = StackOneToolSet(api_key="test_key")
+ tools = toolset.get_tools(filter_pattern="hris_*", account_id="test_account")
+
+ # Verify results
+ assert len(tools) == 1
+ tool = tools.get_tool("hris_get_employee")
+ assert tool is not None
+ assert tool.description == "Get employee details"
+ assert tool._api_key == "test_key"
+ assert tool._account_id == "test_account"
+
+ # Verify the tool parameters
+ assert tool.parameters.properties["id"]["type"] == "string"
+ assert tool.parameters.properties["id"]["description"] == "Employee ID"
+
+
+def test_empty_filter_result():
+ """Test getting tools with a filter pattern that matches nothing"""
+ toolset = StackOneToolSet(api_key="test_key")
+ tools = toolset.get_tools(filter_pattern="unknown_*")
+ assert len(tools) == 0
diff --git a/uv.lock b/uv.lock
index 8079d2a..6f6dfdc 100644
--- a/uv.lock
+++ b/uv.lock
@@ -6,12 +6,6 @@ resolution-markers = [
"python_full_version < '3.12.4'",
]
-[manifest]
-members = [
- "stackone-ai",
- "stackone-ai-python",
-]
-
[[package]]
name = "aiohappyeyeballs"
version = "2.4.6"
@@ -2800,26 +2794,14 @@ wheels = [
[[package]]
name = "stackone-ai"
-version = "0.0.1"
-source = { editable = "packages/stackone-ai" }
+version = "0.0.2"
+source = { editable = "." }
dependencies = [
{ name = "langchain-core" },
{ name = "pydantic" },
{ name = "requests" },
]
-[package.metadata]
-requires-dist = [
- { name = "langchain-core", specifier = ">=0.1.0" },
- { name = "pydantic", specifier = ">=2.10.6" },
- { name = "requests", specifier = ">=2.32.3" },
-]
-
-[[package]]
-name = "stackone-ai-python"
-version = "0.1.0"
-source = { virtual = "." }
-
[package.optional-dependencies]
docs = [
{ name = "mkdocs-material" },
@@ -2850,11 +2832,14 @@ dev = [
[package.metadata]
requires-dist = [
{ name = "crewai", marker = "extra == 'examples'", specifier = ">=0.102.0" },
+ { name = "langchain-core", specifier = ">=0.1.0" },
{ name = "langchain-openai", marker = "extra == 'examples'", specifier = ">=0.3.6" },
{ name = "mkdocs-material", marker = "extra == 'docs'", specifier = ">=9.6.4" },
{ name = "mkdocs-material", marker = "extra == 'pymdown-extensions'", specifier = ">=9.6.4" },
{ name = "openai", marker = "extra == 'examples'", specifier = ">=1.63.2" },
+ { name = "pydantic", specifier = ">=2.10.6" },
{ name = "python-dotenv", marker = "extra == 'examples'", specifier = ">=1.0.1" },
+ { name = "requests", specifier = ">=2.32.3" },
]
provides-extras = ["examples", "docs", "pymdown-extensions"]
@@ -2867,7 +2852,7 @@ dev = [
{ name = "pytest-cov", specifier = ">=6.0.0" },
{ name = "pytest-snapshot", specifier = ">=0.9.0" },
{ name = "ruff", specifier = ">=0.9.6" },
- { name = "stackone-ai", editable = "packages/stackone-ai" },
+ { name = "stackone-ai" },
{ name = "types-requests", specifier = ">=2.31.0.20240311" },
]