Skip to content

Commit db2761d

Browse files
committed
remove commented lines
1 parent d3dbd10 commit db2761d

File tree

2 files changed

+0
-52
lines changed

2 files changed

+0
-52
lines changed

tests/dragon/utils/worker.py

Lines changed: 0 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -96,35 +96,9 @@ def transform_output(
9696
execute_result: mliw.ExecuteResult,
9797
result_device: str,
9898
) -> mliw.TransformOutputResult:
99-
# transformed = [item.clone() for item in execute_result.predictions]
100-
# return OutputTransformResult(transformed)
101-
102-
# transformed = [item.bytes() for item in execute_result.predictions]
103-
104-
# OutputTransformResult.transformed SHOULD be a list of
105-
# capnproto Tensors Or tensor descriptors accompanying bytes
106-
10799
# send the original tensors...
108100
execute_result.predictions = [t.detach() for t in execute_result.predictions]
109101
# todo: solve sending all tensor metadata that coincisdes with each prediction
110102
return mliw.TransformOutputResult(
111103
execute_result.predictions, [1], "c", "float32"
112104
)
113-
# return OutputTransformResult(transformed)
114-
115-
# @staticmethod
116-
# def serialize_reply(
117-
# request: InferenceRequest, results: OutputTransformResult
118-
# ) -> t.Any:
119-
# # results = IntegratedTorchWorker._prepare_outputs(results.outputs)
120-
# # return results
121-
# return None
122-
# # response = MessageHandler.build_response(
123-
# # status=200, # todo: are we satisfied with 0/1 (success, fail)
124-
# # # todo: if not detailed messages, this shouldn't be returned.
125-
# # message="success",
126-
# # result=results,
127-
# # custom_attributes=None,
128-
# # )
129-
# # serialized_resp = MessageHandler.serialize_response(response)
130-
# # return serialized_resp

tests/mli/worker.py

Lines changed: 0 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -96,35 +96,9 @@ def transform_output(
9696
execute_result: mliw.ExecuteResult,
9797
result_device: str,
9898
) -> mliw.TransformOutputResult:
99-
# transformed = [item.clone() for item in execute_result.predictions]
100-
# return OutputTransformResult(transformed)
101-
102-
# transformed = [item.bytes() for item in execute_result.predictions]
103-
104-
# OutputTransformResult.transformed SHOULD be a list of
105-
# capnproto Tensors Or tensor descriptors accompanying bytes
106-
10799
# send the original tensors...
108100
execute_result.predictions = [t.detach() for t in execute_result.predictions]
109101
# todo: solve sending all tensor metadata that coincisdes with each prediction
110102
return mliw.TransformOutputResult(
111103
execute_result.predictions, [1], "c", "float32"
112104
)
113-
# return OutputTransformResult(transformed)
114-
115-
# @staticmethod
116-
# def serialize_reply(
117-
# request: InferenceRequest, results: OutputTransformResult
118-
# ) -> t.Any:
119-
# # results = IntegratedTorchWorker._prepare_outputs(results.outputs)
120-
# # return results
121-
# return None
122-
# # response = MessageHandler.build_response(
123-
# # status=200, # todo: are we satisfied with 0/1 (success, fail)
124-
# # # todo: if not detailed messages, this shouldn't be returned.
125-
# # message="success",
126-
# # result=results,
127-
# # custom_attributes=None,
128-
# # )
129-
# # serialized_resp = MessageHandler.serialize_response(response)
130-
# # return serialized_resp

0 commit comments

Comments
 (0)