Skip to content

Commit b37105e

Browse files
egdanielSkia Commit-Bot
authored andcommitted
Remove support for manual memory management in GrVkMemory.
These code paths are no longer used now that the GrBackendTexture code uses the normal Ganesh code paths. Change-Id: Idecf6876d96c7be587919f969d4736374d82c199 Reviewed-on: https://skia-review.googlesource.com/c/skia/+/289623 Reviewed-by: Jim Van Verth <[email protected]> Commit-Queue: Greg Daniel <[email protected]>
1 parent fe49ce2 commit b37105e

File tree

1 file changed

+18
-53
lines changed

1 file changed

+18
-53
lines changed

src/gpu/vk/GrVkMemory.cpp

Lines changed: 18 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -76,12 +76,9 @@ bool GrVkMemory::AllocAndBindBufferMemory(GrVkGpu* gpu,
7676

7777
void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type,
7878
const GrVkAlloc& alloc) {
79-
if (alloc.fBackendMemory) {
80-
GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
81-
allocator->freeMemory(alloc.fBackendMemory);
82-
} else {
83-
GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
84-
}
79+
SkASSERT(alloc.fBackendMemory);
80+
GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
81+
allocator->freeMemory(alloc.fBackendMemory);
8582
}
8683

8784
const VkDeviceSize kMaxSmallImageSize = 256 * 1024;
@@ -128,38 +125,22 @@ bool GrVkMemory::AllocAndBindImageMemory(GrVkGpu* gpu,
128125

129126
void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
130127
const GrVkAlloc& alloc) {
131-
if (alloc.fBackendMemory) {
132-
GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
133-
allocator->freeMemory(alloc.fBackendMemory);
134-
} else {
135-
GR_VK_CALL(gpu->vkInterface(), FreeMemory(gpu->device(), alloc.fMemory, nullptr));
136-
}
128+
SkASSERT(alloc.fBackendMemory);
129+
GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
130+
allocator->freeMemory(alloc.fBackendMemory);
137131
}
138132

139133
void* GrVkMemory::MapAlloc(GrVkGpu* gpu, const GrVkAlloc& alloc) {
140134
SkASSERT(GrVkAlloc::kMappable_Flag & alloc.fFlags);
141-
if (alloc.fBackendMemory) {
142-
GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
143-
return allocator->mapMemory(alloc.fBackendMemory);
144-
}
145-
146-
void* mapPtr;
147-
VkResult err;
148-
GR_VK_CALL_RESULT(gpu, err, MapMemory(gpu->device(), alloc.fMemory, alloc.fOffset, alloc.fSize,
149-
0, &mapPtr));
150-
if (err) {
151-
mapPtr = nullptr;
152-
}
153-
return mapPtr;
135+
SkASSERT(alloc.fBackendMemory);
136+
GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
137+
return allocator->mapMemory(alloc.fBackendMemory);
154138
}
155139

156140
void GrVkMemory::UnmapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
157-
if (alloc.fBackendMemory) {
158-
GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
159-
allocator->unmapMemory(alloc.fBackendMemory);
160-
} else {
161-
GR_VK_CALL(gpu->vkInterface(), UnmapMemory(gpu->device(), alloc.fMemory));
162-
}
141+
SkASSERT(alloc.fBackendMemory);
142+
GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
143+
allocator->unmapMemory(alloc.fBackendMemory);
163144
}
164145

165146
void GrVkMemory::GetNonCoherentMappedMemoryRange(const GrVkAlloc& alloc, VkDeviceSize offset,
@@ -190,17 +171,9 @@ void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, Vk
190171
if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
191172
SkASSERT(offset == 0);
192173
SkASSERT(size <= alloc.fSize);
193-
if (alloc.fBackendMemory) {
194-
GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
195-
allocator->flushMappedMemory(alloc.fBackendMemory, offset, size);
196-
} else {
197-
VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
198-
VkMappedMemoryRange mappedMemoryRange;
199-
GrVkMemory::GetNonCoherentMappedMemoryRange(alloc, offset, size, alignment,
200-
&mappedMemoryRange);
201-
GR_VK_CALL(gpu->vkInterface(), FlushMappedMemoryRanges(gpu->device(), 1,
202-
&mappedMemoryRange));
203-
}
174+
SkASSERT(alloc.fBackendMemory);
175+
GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
176+
allocator->flushMappedMemory(alloc.fBackendMemory, offset, size);
204177
}
205178
}
206179

@@ -209,17 +182,9 @@ void GrVkMemory::InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& allo
209182
if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
210183
SkASSERT(offset == 0);
211184
SkASSERT(size <= alloc.fSize);
212-
if (alloc.fBackendMemory) {
213-
GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
214-
allocator->invalidateMappedMemory(alloc.fBackendMemory, offset, size);
215-
} else {
216-
VkDeviceSize alignment = gpu->physicalDeviceProperties().limits.nonCoherentAtomSize;
217-
VkMappedMemoryRange mappedMemoryRange;
218-
GrVkMemory::GetNonCoherentMappedMemoryRange(alloc, offset, size, alignment,
219-
&mappedMemoryRange);
220-
GR_VK_CALL(gpu->vkInterface(), InvalidateMappedMemoryRanges(gpu->device(), 1,
221-
&mappedMemoryRange));
222-
}
185+
SkASSERT(alloc.fBackendMemory);
186+
GrVkMemoryAllocator* allocator = gpu->memoryAllocator();
187+
allocator->invalidateMappedMemory(alloc.fBackendMemory, offset, size);
223188
}
224189
}
225190

0 commit comments

Comments
 (0)