@@ -36,14 +36,23 @@ The `appsettings.json` is the easiest option for configuring model sets. Below i
36
36
"AllowedHosts" : " *" ,
37
37
38
38
"OnnxStackConfig" : {
39
- "Name" : " Clip Tokenizer" ,
40
- "TokenizerLimit" : 77 ,
41
- "ModelConfigurations" : [{
42
- "Type" : " Tokenizer" ,
43
- "DeviceId" : 0 ,
44
- "ExecutionProvider" : " Cpu" ,
45
- "OnnxModelPath" : " D:\\ Repositories\\ stable-diffusion-v1-5\\ cliptokenizer.onnx"
46
- }]
39
+ "OnnxModelSets" : [
40
+ {
41
+ "Name" : " ClipTokenizer" ,
42
+ "IsEnabled" : true ,
43
+ "DeviceId" : 0 ,
44
+ "InterOpNumThreads" : 0 ,
45
+ "IntraOpNumThreads" : 0 ,
46
+ "ExecutionMode" : " ORT_SEQUENTIAL" ,
47
+ "ExecutionProvider" : " DirectML" ,
48
+ "ModelConfigurations" : [
49
+ {
50
+ "Type" : " Tokenizer" ,
51
+ "OnnxModelPath" : " cliptokenizer.onnx"
52
+ },
53
+ ]
54
+ }
55
+ ]
47
56
}
48
57
}
49
58
```
@@ -53,66 +62,36 @@ The `appsettings.json` is the easiest option for configuring model sets. Below i
53
62
### Basic C# Example
54
63
``` csharp
55
64
56
- // From DI
57
- IOnnxModelService _onnxModelService ;
58
-
59
-
60
65
// Tokenizer model Example
61
- var text = " Text To Tokenize" ;
62
- var inputTensor = new DenseTensor <string >(new string [] { text }, new int [] { 1 });
63
- var inputString = new List <NamedOnnxValue >
64
- {
65
- NamedOnnxValue .CreateFromTensor (" string_input" , inputTensor )
66
- };
67
-
68
- // Create an InferenceSession from the Onnx clip tokenizer.
69
- // Run session and send the input data in to get inference output.
70
- using (var tokens = _onnxModelService .RunInference (OnnxModelType .Tokenizer , inputString ))
71
- {
72
- var resultTensor = tokens .ToArray ();
73
- }
74
-
75
- ```
76
-
77
-
78
-
79
- ### Basic C# Example (No DI)
80
- ``` csharp
81
- // Create Configuration
82
- var onnxStackConfig = new OnnxStackConfig
83
- {
84
- Name = " OnnxStack" ,
85
- TokenizerLimit = 77 ,
86
- ModelConfigurations = new List <OnnxModelSessionConfig >
87
- {
88
- new OnnxModelSessionConfig
89
- {
90
- DeviceId = 0 ,
91
- ExecutionProvider = ExecutionProvider .DirectML ,
66
+ // ----------------------//
92
67
93
- Type = OnnxModelType .Tokenizer ,
94
- OnnxModelPath = " clip_tokenizer.onnx" ,
95
- }
96
- }
97
- };
68
+ // From DI
69
+ OnnxStackConfig _onnxStackConfig ;
70
+ IOnnxModelService _onnxModelService ;
98
71
99
- // Create Service
100
- var onnxModelService = new OnnxModelService ( onnxStackConfig );
72
+ // Get Model
73
+ var model = _onnxStackConfig . OnnxModelSets . First ( );
101
74
75
+ // Get Model Metadata
76
+ var metadata = _onnxModelService .GetModelMetadata (model , OnnxModelType .Tokenizer );
102
77
103
- // Tokenizer model Example
78
+ // Create Input
104
79
var text = " Text To Tokenize" ;
105
80
var inputTensor = new DenseTensor <string >(new string [] { text }, new int [] { 1 });
106
- var inputString = new List <NamedOnnxValue >
107
- {
108
- NamedOnnxValue .CreateFromTensor (" string_input" , inputTensor )
109
- };
110
81
111
- // Create an InferenceSession from the Onnx clip tokenizer.
112
- // Run session and send the input data in to get inference output.
113
- using (var tokens = onnxModelService .RunInference (OnnxModelType .Tokenizer , inputString ))
82
+ // Create Inference Parameters container
83
+ using (var inferenceParameters = new OnnxInferenceParameters (metadata ))
114
84
{
115
- var resultTensor = tokens .ToArray ();
85
+ // Set Inputs and Outputs
86
+ inferenceParameters .AddInputTensor (inputTensor );
87
+ inferenceParameters .AddOutputBuffer ();
88
+
89
+ // Run Inference
90
+ using (var results = _onnxModelService .RunInference (model , OnnxModelType .Tokenizer , inferenceParameters ))
91
+ {
92
+ // Extract Result
93
+ var resultData = results [0 ].ToDenseTensor ();
94
+ }
116
95
}
117
96
118
97
```
0 commit comments