@@ -180,10 +180,12 @@ class TorchGenerator : public ov::genai::CppStdGenerator {
180180 }
181181
182182 float next () override {
183+ py::gil_scoped_acquire acquire;
183184 return m_torch.attr (" randn" )(1 , " generator" _a=m_torch_generator, " dtype" _a=m_float32).attr (" item" )().cast <float >();
184185 }
185186
186187 ov::Tensor randn_tensor (const ov::Shape& shape) override {
188+ py::gil_scoped_acquire acquire;
187189 py::object torch_tensor = m_torch.attr (" randn" )(to_py_list (shape), " generator" _a=m_torch_generator, " dtype" _a=m_float32);
188190 py::object numpy_tensor = torch_tensor.attr (" numpy" )();
189191 py::array numpy_array = py::cast<py::array>(numpy_tensor);
@@ -201,6 +203,32 @@ class TorchGenerator : public ov::genai::CppStdGenerator {
201203 TorchTensorAllocator (size_t total_size, void * mutable_data, py::object torch_tensor) :
202204 m_total_size (total_size), m_mutable_data(mutable_data), m_torch_tensor(torch_tensor) { }
203205
206+ ~TorchTensorAllocator () {
207+ if (m_torch_tensor && Py_IsInitialized ()) {
208+ py::gil_scoped_acquire acquire;
209+ m_torch_tensor = py::object ();
210+ }
211+ }
212+
213+ TorchTensorAllocator (const TorchTensorAllocator& other)
214+ : m_total_size(other.m_total_size), m_mutable_data(other.m_mutable_data) {
215+ py::gil_scoped_acquire acquire;
216+ m_torch_tensor = other.m_torch_tensor ;
217+ }
218+
219+ TorchTensorAllocator& operator =(const TorchTensorAllocator& other) {
220+ if (this != &other) {
221+ m_total_size = other.m_total_size ;
222+ m_mutable_data = other.m_mutable_data ;
223+ py::gil_scoped_acquire acquire;
224+ m_torch_tensor = other.m_torch_tensor ;
225+ }
226+ return *this ;
227+ }
228+
229+ TorchTensorAllocator (TorchTensorAllocator&&) = default ;
230+ TorchTensorAllocator& operator =(TorchTensorAllocator&&) = default ;
231+
204232 void * allocate (size_t bytes, size_t ) const {
205233 if (m_total_size == bytes) {
206234 return m_mutable_data;
@@ -221,6 +249,7 @@ class TorchGenerator : public ov::genai::CppStdGenerator {
221249 }
222250
223251 void seed (size_t new_seed) override {
252+ py::gil_scoped_acquire acquire;
224253 create_torch_generator (new_seed);
225254 }
226255};
@@ -448,12 +477,7 @@ void init_image_generation_pipelines(py::module_& m) {
448477 ) -> py::typing::Union<ov::Tensor> {
449478 ov::AnyMap params = pyutils::kwargs_to_any_map (kwargs);
450479 ov::Tensor res;
451- if (params_have_torch_generator (params)) {
452- // TorchGenerator stores python object which causes segfault after gil_scoped_release
453- // so if it was passed, we don't release GIL
454- res = pipe.generate (prompt, params);
455- }
456- else {
480+ {
457481 py::gil_scoped_release rel;
458482 res = pipe.generate (prompt, params);
459483 }
@@ -565,12 +589,7 @@ void init_image_generation_pipelines(py::module_& m) {
565589 ) -> py::typing::Union<ov::Tensor> {
566590 ov::AnyMap params = pyutils::kwargs_to_any_map (kwargs);
567591 ov::Tensor res;
568- if (params_have_torch_generator (params)) {
569- // TorchGenerator stores python object which causes segfault after gil_scoped_release
570- // so if it was passed, we don't release GIL
571- res = pipe.generate (prompt, image, params);
572- }
573- else {
592+ {
574593 py::gil_scoped_release rel;
575594 res = pipe.generate (prompt, image, params);
576595 }
@@ -676,12 +695,7 @@ void init_image_generation_pipelines(py::module_& m) {
676695 ) -> py::typing::Union<ov::Tensor> {
677696 ov::AnyMap params = pyutils::kwargs_to_any_map (kwargs);
678697 ov::Tensor res;
679- if (params_have_torch_generator (params)) {
680- // TorchGenerator stores python object which causes segfault after gil_scoped_release
681- // so if it was passed, we don't release GIL
682- res = pipe.generate (prompt, image, mask_image, params);
683- }
684- else {
698+ {
685699 py::gil_scoped_release rel;
686700 res = pipe.generate (prompt, image, mask_image, params);
687701 }
0 commit comments