Skip to content

Commit

Permalink
update coomon version
Browse files Browse the repository at this point in the history
  • Loading branch information
allnes committed Sep 24, 2024
1 parent dcc21b6 commit c41e65f
Show file tree
Hide file tree
Showing 8 changed files with 25 additions and 88 deletions.
36 changes: 1 addition & 35 deletions src/plugins/intel_cpu/src/nodes/executors/common/ref_mvn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,44 +5,10 @@
#include "ref_mvn.hpp"
#include "openvino/core/parallel.hpp"

ov::intel_cpu::VectorDims ov::intel_cpu::CommonMVNExecutor::transformTo5DCase(const ov::intel_cpu::VectorDims& shape,
const ov::intel_cpu::MVNAttrs& mvnAttrs) {
size_t rank = shape.size();
// for 1 and 2 rank, if initAcrossChannels_ is true, adjust shape to fully vectorize under unified 5d procedure.
// otherwise there are not enough data in spatial dimension to process in one kernel.
ov::intel_cpu::VectorDims shape5D;
switch (rank) {
case 1 : // C
if (mvnAttrs.initAcrossChannels_) {
shape5D = {1, 1, 1, 1, shape[0]};
break;
} else {
shape5D = {1, shape[0], 1, 1, 1};
break;
}
case 2 : // NC
if (mvnAttrs.initAcrossChannels_) {
shape5D = {1, shape[0], 1, shape[1], 1};
break;
} else {
shape5D = {shape[0], shape[1], 1, 1, 1};
break;
}
case 3 : { shape5D = {shape[0], shape[1], 1, shape[2], 1}; break; }
case 4 : { shape5D = {shape[0], shape[1], 1, shape[2], shape[3]}; break; }
case 5 : { shape5D = {shape[0], shape[1], shape[2], shape[3], shape[4]}; break; }
default: {
OPENVINO_THROW("MVN layer with name doesn't support planar layout with rank: ",
shape.size());
}
}
return shape5D;
}

void ov::intel_cpu::CommonMVNExecutor::execute(const ov::intel_cpu::MemoryArgs &memory) {
mvn_ref(reinterpret_cast<uint8_t *>(memory.at(ARG_SRC_0)->getData()),
reinterpret_cast<uint8_t *>(memory.at(ARG_DST)->getData()),
transformTo5DCase(memory.at(ARG_SRC_0)->getStaticDims(), refMVNAttrs));
refMVNAttrs.shape5D);
}

bool ov::intel_cpu::CommonMVNExecutor::update(const ov::intel_cpu::MemoryArgs &memory) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ class CommonMVNExecutor : public Executor {
private:
const MVNAttrs& refMVNAttrs;
void mvn_ref(const uint8_t *in_ptr_, uint8_t *out_ptr_, const VectorDims& shape5d);
VectorDims transformTo5DCase(const VectorDims& shape, const MVNAttrs& mvnAttrs);
};

} // namespace intel_cpu
Expand Down
2 changes: 2 additions & 0 deletions src/plugins/intel_cpu/src/nodes/executors/mvn_config.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@ struct MVNAttrs {
ov::element::Type src_prc;
ov::element::Type dst_prc;
bool srcIsNHWC = false;
std::vector<const void*> postOpsDataPtrs;
VectorDims shape5D = {0, 0, 0, 0, 0};
};

using MVNConfig = executor::Config<MVNAttrs>;
Expand Down
13 changes: 10 additions & 3 deletions src/plugins/intel_cpu/src/nodes/executors/mvn_implementations.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ using namespace TypeMaskAlias;
using namespace executor;

using LayoutConfig = std::vector<LayoutType>;
static const LayoutConfig jitMVNLayoutConfig{LayoutType::ncsp, LayoutType::ncsp, LayoutType::ncsp, LayoutType::ncsp};
static const LayoutConfig aclMVNLayoutConfig{LayoutType::ncsp, LayoutType::ncsp, LayoutType::ncsp, LayoutType::ncsp};
static const LayoutConfig refMVNLayoutConfig{LayoutType::ncsp, LayoutType::ncsp, LayoutType::ncsp, LayoutType::ncsp};

Expand All @@ -49,6 +50,12 @@ static const TypeMapping aclMVNTypeMapping {
{{_any, _any}, pt(just<f32>(), just<f32>())}
};

static const TypeMapping jitMVNTypeMapping {
// {src, dst} pt<src, dst>
{{_f32 | _f16, _any}, pt(bypass(), use<0>())},
{{_any, _any}, pt(just<f32>(), just<f32>())}
};

static const TypeMapping refMVNTypeMapping {
// {src, dst} pt<src, dst>
{{_any, _any}, pt(just<f32>(), just<f32>())}
Expand Down Expand Up @@ -128,7 +135,7 @@ template <>
const std::vector<ExecutorImplementation<MVNAttrs>>& getImplementations() {
static const std::vector<ExecutorImplementation<MVNAttrs>> mvnImplementations {
OV_CPU_INSTANCE_X64(
"mvn_acl",
"mvn_jit_x64",
ExecutorType::jit_x64,
OperationType::MVN,
ShapeTolerance::Agnostic,
Expand All @@ -139,8 +146,8 @@ const std::vector<ExecutorImplementation<MVNAttrs>>& getImplementations() {
// requiresFallback
[](const MVNConfig& config) -> ov::optional<executor::Config<MVNAttrs>> {
return requiresFallbackCommon(config,
aclMVNTypeMapping,
aclMVNLayoutConfig,
jitMVNTypeMapping,
jitMVNLayoutConfig,
mvnMappingNotation);
},
// acceptsShapes
Expand Down
37 changes: 2 additions & 35 deletions src/plugins/intel_cpu/src/nodes/executors/x64/jit_mvn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2200,8 +2200,8 @@ void MVNJitExecutor::mvn_blk(const uint8_t* src_data, uint8_t* dst_data, const v
void JITMVNExecutor::execute(const MemoryArgs &memory) {
oldMVNJitExecutor->exec(reinterpret_cast<uint8_t *>(memory.at(ARG_SRC_0)->getData()),
reinterpret_cast<uint8_t *>(memory.at(ARG_DST_0)->getData()),
nullptr,
transformTo5DCase(memory.at(ARG_SRC_0)->getStaticDims(), jitMVNAttrs));
jitMVNAttrs.postOpsDataPtrs.data(),
jitMVNAttrs.shape5D);
}

bool JITMVNExecutor::update(const MemoryArgs &memory) {
Expand All @@ -2213,39 +2213,6 @@ bool JITMVNExecutor::supports(const MVNConfig &config) {
return false;
}

VectorDims JITMVNExecutor::transformTo5DCase(const VectorDims &shape, const MVNAttrs &mvnAttrs) {
size_t rank = shape.size();
// for 1 and 2 rank, if initAcrossChannels_ is true, adjust shape to fully vectorize under unified 5d procedure.
// otherwise there are not enough data in spatial dimension to process in one kernel.
ov::intel_cpu::VectorDims shape5D;
switch (rank) {
case 1 : // C
if (mvnAttrs.initAcrossChannels_) {
shape5D = {1, 1, 1, 1, shape[0]};
break;
} else {
shape5D = {1, shape[0], 1, 1, 1};
break;
}
case 2 : // NC
if (mvnAttrs.initAcrossChannels_) {
shape5D = {1, shape[0], 1, shape[1], 1};
break;
} else {
shape5D = {shape[0], shape[1], 1, 1, 1};
break;
}
case 3 : { shape5D = {shape[0], shape[1], 1, shape[2], 1}; break; }
case 4 : { shape5D = {shape[0], shape[1], 1, shape[2], shape[3]}; break; }
case 5 : { shape5D = {shape[0], shape[1], shape[2], shape[3], shape[4]}; break; }
default: {
OPENVINO_THROW("MVN layer with name doesn't support planar layout with rank: ",
shape.size());
}
}
return shape5D;
}


} // namespace intel_cpu
} // namespace ov
1 change: 0 additions & 1 deletion src/plugins/intel_cpu/src/nodes/executors/x64/jit_mvn.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,6 @@ class JITMVNExecutor : public Executor {
private:
const MVNAttrs& jitMVNAttrs;
std::shared_ptr<old_version::MVNJitExecutor> oldMVNJitExecutor;
VectorDims transformTo5DCase(const VectorDims& shape, const MVNAttrs& mvnAttrs);
};

} // namespace intel_cpu
Expand Down
20 changes: 10 additions & 10 deletions src/plugins/intel_cpu/src/nodes/mvn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -384,25 +384,25 @@ void MVN::transformTo5DCase(const VectorDims& shape) {
switch (rank) {
case 1 : // C
if (mvnAttrs.initAcrossChannels_) {
shape5D = {1, 1, 1, 1, shape[0]};
mvnAttrs.shape5D = {1, 1, 1, 1, shape[0]};
mvnAttrs.execAcrossChannels_ = false;
break;
} else {
shape5D = {1, shape[0], 1, 1, 1};
mvnAttrs.shape5D = {1, shape[0], 1, 1, 1};
break;
}
case 2 : // NC
if (mvnAttrs.initAcrossChannels_) {
shape5D = {1, shape[0], 1, shape[1], 1};
mvnAttrs.shape5D = {1, shape[0], 1, shape[1], 1};
mvnAttrs.execAcrossChannels_ = false;
break;
} else {
shape5D = {shape[0], shape[1], 1, 1, 1};
mvnAttrs.shape5D = {shape[0], shape[1], 1, 1, 1};
break;
}
case 3 : { shape5D = {shape[0], shape[1], 1, shape[2], 1}; break; }
case 4 : { shape5D = {shape[0], shape[1], 1, shape[2], shape[3]}; break; }
case 5 : { shape5D = {shape[0], shape[1], shape[2], shape[3], shape[4]}; break; }
case 3 : { mvnAttrs.shape5D = {shape[0], shape[1], 1, shape[2], 1}; break; }
case 4 : { mvnAttrs.shape5D = {shape[0], shape[1], 1, shape[2], shape[3]}; break; }
case 5 : { mvnAttrs.shape5D = {shape[0], shape[1], shape[2], shape[3], shape[4]}; break; }
default: {
OPENVINO_THROW("MVN layer with name '",
getName(),
Expand All @@ -414,17 +414,17 @@ void MVN::transformTo5DCase(const VectorDims& shape) {

void MVN::setPostOps(dnnl::primitive_attr &attr, bool initWeights) {
dnnl::post_ops ops;
postOpsDataPtrs.clear();
mvnAttrs.postOpsDataPtrs.clear();
for (auto &node : fusedWith) {
auto* fakeQuantizeNode = dynamic_cast<FakeQuantize *>(node.get());
if (fakeQuantizeNode) {
fakeQuantizeNode->appendPostOps(ops, {}, postOpsDataPtrs);
fakeQuantizeNode->appendPostOps(ops, {}, mvnAttrs.postOpsDataPtrs);
continue;
}

auto* eltwiseNode = dynamic_cast<Eltwise *>(node.get());
if (eltwiseNode) {
eltwiseNode->appendPostOps(ops, shape5D, postOpsDataPtrs);
eltwiseNode->appendPostOps(ops, mvnAttrs.shape5D, mvnAttrs.postOpsDataPtrs);
continue;
}
OPENVINO_THROW("Fusing of ",
Expand Down
3 changes: 0 additions & 3 deletions src/plugins/intel_cpu/src/nodes/mvn.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,15 +47,12 @@ class MVN : public Node {
void transformTo5DCase(const VectorDims& shape);
ExecutorPtr createExecutor();

std::vector<const void*> postOpsDataPtrs;

MVNAttrs mvnAttrs;
PostOps postOps;
MemoryArgs memory;
ExecutorFactoryPtr<MVNAttrs, node::MVN> factory;
ExecutorPtr executor = nullptr;
bool onlyUnaryPostOps = true;
VectorDims shape5D = {0, 0, 0, 0, 0};
bool canUseAclExecutor = false;
};

Expand Down

0 comments on commit c41e65f

Please sign in to comment.