Skip to content
This repository was archived by the owner on Nov 27, 2024. It is now read-only.

Commit 9e9efde

Browse files
committed
Update README
1 parent 776aa06 commit 9e9efde

File tree

2 files changed

+39
-62
lines changed

2 files changed

+39
-62
lines changed

OnnxStack.Core/README.md

Lines changed: 38 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -36,14 +36,23 @@ The `appsettings.json` is the easiest option for configuring model sets. Below i
3636
"AllowedHosts": "*",
3737

3838
"OnnxStackConfig": {
39-
"Name": "Clip Tokenizer",
40-
"TokenizerLimit": 77,
41-
"ModelConfigurations": [{
42-
"Type": "Tokenizer",
43-
"DeviceId": 0,
44-
"ExecutionProvider": "Cpu",
45-
"OnnxModelPath": "D:\\Repositories\\stable-diffusion-v1-5\\cliptokenizer.onnx"
46-
}]
39+
"OnnxModelSets": [
40+
{
41+
"Name": "ClipTokenizer",
42+
"IsEnabled": true,
43+
"DeviceId": 0,
44+
"InterOpNumThreads": 0,
45+
"IntraOpNumThreads": 0,
46+
"ExecutionMode": "ORT_SEQUENTIAL",
47+
"ExecutionProvider": "DirectML",
48+
"ModelConfigurations": [
49+
{
50+
"Type": "Tokenizer",
51+
"OnnxModelPath": "cliptokenizer.onnx"
52+
},
53+
]
54+
}
55+
]
4756
}
4857
}
4958
```
@@ -53,66 +62,36 @@ The `appsettings.json` is the easiest option for configuring model sets. Below i
5362
### Basic C# Example
5463
```csharp
5564

56-
// From DI
57-
IOnnxModelService _onnxModelService;
58-
59-
6065
// Tokenizer model Example
61-
var text = "Text To Tokenize";
62-
var inputTensor = new DenseTensor<string>(new string[] { text }, new int[] { 1 });
63-
var inputString = new List<NamedOnnxValue>
64-
{
65-
NamedOnnxValue.CreateFromTensor("string_input", inputTensor)
66-
};
67-
68-
// Create an InferenceSession from the Onnx clip tokenizer.
69-
// Run session and send the input data in to get inference output.
70-
using (var tokens = _onnxModelService.RunInference(OnnxModelType.Tokenizer, inputString))
71-
{
72-
var resultTensor = tokens.ToArray();
73-
}
74-
75-
```
76-
77-
78-
79-
### Basic C# Example (No DI)
80-
```csharp
81-
// Create Configuration
82-
var onnxStackConfig = new OnnxStackConfig
83-
{
84-
Name = "OnnxStack",
85-
TokenizerLimit = 77,
86-
ModelConfigurations = new List<OnnxModelSessionConfig>
87-
{
88-
new OnnxModelSessionConfig
89-
{
90-
DeviceId = 0,
91-
ExecutionProvider = ExecutionProvider.DirectML,
66+
//----------------------//
9267
93-
Type = OnnxModelType.Tokenizer,
94-
OnnxModelPath = "clip_tokenizer.onnx",
95-
}
96-
}
97-
};
68+
// From DI
69+
OnnxStackConfig _onnxStackConfig;
70+
IOnnxModelService _onnxModelService;
9871

99-
// Create Service
100-
var onnxModelService = new OnnxModelService(onnxStackConfig);
72+
// Get Model
73+
var model = _onnxStackConfig.OnnxModelSets.First();
10174

75+
// Get Model Metadata
76+
var metadata = _onnxModelService.GetModelMetadata(model, OnnxModelType.Tokenizer);
10277

103-
// Tokenizer model Example
78+
// Create Input
10479
var text = "Text To Tokenize";
10580
var inputTensor = new DenseTensor<string>(new string[] { text }, new int[] { 1 });
106-
var inputString = new List<NamedOnnxValue>
107-
{
108-
NamedOnnxValue.CreateFromTensor("string_input", inputTensor)
109-
};
11081

111-
// Create an InferenceSession from the Onnx clip tokenizer.
112-
// Run session and send the input data in to get inference output.
113-
using (var tokens = onnxModelService.RunInference(OnnxModelType.Tokenizer, inputString))
82+
// Create Inference Parameters container
83+
using (var inferenceParameters = new OnnxInferenceParameters(metadata))
11484
{
115-
var resultTensor = tokens.ToArray();
85+
// Set Inputs and Outputs
86+
inferenceParameters.AddInputTensor(inputTensor);
87+
inferenceParameters.AddOutputBuffer();
88+
89+
// Run Inference
90+
using (var results = _onnxModelService.RunInference(model, OnnxModelType.Tokenizer, inferenceParameters))
91+
{
92+
// Extract Result
93+
var resultData = results[0].ToDenseTensor();
94+
}
11695
}
11796

11897
```

OnnxStack.StableDiffusion/Diffusers/DiffuserBase.cs

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -232,10 +232,8 @@ protected virtual async Task<DenseTensor<float>> DecodeLatentsAsync(IModelOption
232232

233233

234234
/// <summary>
235-
/// Creates the timestep OrtValue based on its NodeMetadata type.
235+
/// Creates the timestep tensor.
236236
/// </summary>
237-
/// <param name="nodeMetadata">The node metadata.</param>
238-
/// <param name="timestepInputName">Name of the timestep input.</param>
239237
/// <param name="timestep">The timestep.</param>
240238
/// <returns></returns>
241239
protected static DenseTensor<float> CreateTimestepTensor(int timestep)

0 commit comments

Comments
 (0)