mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
wipe cache with writes (#12279)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/12279 By some reason if we don't write to the wipe buffer, it doesn't really wipe out everything from caches in x86. We also need to wipe out cache after initializing input blobs. Reviewed By: Maratyszcza Differential Revision: D10161211 fbshipit-source-id: c34414dd8b83947805010d7d57e4134d56de1430
This commit is contained in:
committed by
Facebook Github Bot
parent
6b9afc894b
commit
557015fd93
@ -260,6 +260,9 @@ void runNetwork(
|
||||
for (int i = 0; i < iter; ++i) {
|
||||
caffe2::ObserverConfig::initSampleRate(1, 1, 1, 0, warmup);
|
||||
fillInputBlob(workspace, tensor_protos_map, i);
|
||||
if (wipe_cache) {
|
||||
caffe2::wipe_cache();
|
||||
}
|
||||
CAFFE_ENFORCE(net->Run(), "Main run ", i, " has failed.");
|
||||
if (wipe_cache) {
|
||||
caffe2::wipe_cache();
|
||||
@ -267,9 +270,6 @@ void runNetwork(
|
||||
if (run_individual) {
|
||||
caffe2::ObserverConfig::initSampleRate(1, 1, 1, 1, warmup);
|
||||
CAFFE_ENFORCE(net->Run(), "Main run ", i, " with operator has failed.");
|
||||
if (wipe_cache) {
|
||||
caffe2::wipe_cache();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -73,13 +73,14 @@ uint32_t wipe_cache() {
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
LOG(INFO) << "Allocating cache wipe buffer of size" << wipe_size;
|
||||
LOG(INFO) << "Allocating cache wipe buffer of size " << wipe_size;
|
||||
wipe_buffer = static_cast<uint32_t*>(malloc(wipe_size));
|
||||
CAFFE_ENFORCE(wipe_buffer != nullptr);
|
||||
}
|
||||
uint32_t hash = 0;
|
||||
for (uint32_t i = 0; i * sizeof(uint32_t) < wipe_size; i += 8) {
|
||||
hash ^= wipe_buffer[i];
|
||||
wipe_buffer[i] = hash;
|
||||
}
|
||||
/* Make sure compiler doesn't optimize the loop away */
|
||||
return hash;
|
||||
|
Reference in New Issue
Block a user