diff --git a/docs/lite/docs/source_zh_cn/reference/operator_list_lite.md b/docs/lite/docs/source_zh_cn/reference/operator_list_lite.md
index 24883cfa414cd367d7a5d15d2e8c653f45ab6067..a276d3a0d202a75af34ec38a8e2dd0a1b9adab5f 100644
--- a/docs/lite/docs/source_zh_cn/reference/operator_list_lite.md
+++ b/docs/lite/docs/source_zh_cn/reference/operator_list_lite.md
@@ -1,368 +1,204 @@
-# Lite算子支持
+# MindSpore Lite支持的硬件后端列表
[](https://gitee.com/mindspore/docs/blob/master/docs/lite/docs/source_zh_cn/reference/operator_list_lite.md)
-MindSpore Lite支持不同硬件后端的算子列表:
-
-| 操作名
| CPU
FP16 | CPU
FP32 | CPU
Int32 | CPU
Int8 | CPU
UInt8 | CPU
Bool | Mali/Adreno GPU
FP16 | Mali/Adreno GPU
FP32 | Mali/Adreno GPU
Int32 | Mali/Adreno GPU
Int8 | 麒麟NPU
FP16 | 英伟达GPU
FP16 | 昇腾
FP16 |
-| ----------------------------------- | :----------: | :----------: | ------------- | :----------: | :-----------: | ------------ | :----------------------: | :----------------------: | ------------------------- | ------------------------ | :--------------------: | :----------------: | :----------------------: |
-| Abs | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| AbsGrad | | ✅ | | | | | | | | | | | |
-| Activation | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| ActivationGrad | ✅ | ✅ | | | | | | | | | | | |
-| Adam | | ✅ | | | | | | | | | | | |
-| AddFusion | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | ✅ | ✅ |
-| AdderFusion | | ✅ | | | | | | | | | | | |
-| AddGrad | | ✅ | | | | | | | | | | | |
-| AddN | ✅ | ✅ | | | | | | | | | | | |
-| Affine | | ✅ | | | | | | | | | | | ✅ |
-| All | | ✅ | | | | | | | | | | ✅ | |
-| AllGather | | ✅ | | | | | | | | | | ✅ | |
-| ApplyMomentum | | ✅ | | | | | | | | | | | ✅ |
-| Assert | ✅ | ✅ | | | | ✅ | | | | | | | |
-| Assign | | ✅ | | | | | | | | | | | ✅ |
-| ArgmaxFusion | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| ArgminFusion | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | | | ✅ |
-| AvgPoolFusion | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| AvgPoolGrad | ✅ | ✅ | | | | | | | | | | | |
-| BatchNorm | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | | | ✅ |
-| BatchNormGrad | ✅ | ✅ | | | | | | | | | | | |
-| BatchToSpace | | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | | | |
-| BatchToSpaceND | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | | | |
-| BiasAdd | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | | ✅ | ✅ |
-| BiasAddGrad | ✅ | ✅ | | | | | | | | | | | |
-| BinaryCrossEntropy | | ✅ | | | | | | | | | | | ✅ |
-| BinaryCrossEntropyGrad | | ✅ | | | | | | | | | | | |
-| BroadcastTo | ✅ | ✅ | ✅ | | | ✅ | | | | | | | |
-| Call | ✅ | ✅ | ✅ | | | ✅ | | | | | | | ✅ |
-| Cast | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| Ceil | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | | ✅ |
-| Clip | | ✅ | ✅ | | | | | | | | | | ✅ |
-| Concat | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | ✅ |
-| ConstantOfShape | ✅ | ✅ | ✅ | | | | | | | | | | |
-| Conv2DFusion | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| Conv2DBackpropFilterFusion | ✅ | ✅ | | | | | | | | | | | |
-| Conv2DBackpropInputFusion | ✅ | ✅ | | | | | | | | | | | |
-| Conv2dTransposeFusion | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| Cos | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | | ✅ |
-| Crop | ✅ | ✅ | ✅ | ✅ | ✅ | | | | | | | | |
-| CropAndResize | | ✅ | | | | | | | | | ✅ | | |
-| CumSum | | ✅ | ✅ | | | | | | | | | | ✅ |
-| CustomExtractFeatures | | ✅ | | | | | | | | | | | |
-| CustomNormalize | | ✅ | | | | | | | | | | | |
-| CustomPredict | | ✅ | ✅ | | | | | | | | | | |
-| DEConv2DGradFilter | | ✅ | | | | | | | | | | | |
-| DepthToSpace | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | | | |
-| DetectionPostProcess | | ✅ | | ✅ | ✅ | | | | | | | | |
-| DivFusion | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| DivGrad | | ✅ | | | | | | | | | | | |
-| Dropout | ✅ | ✅ | | | | | | | | | | | ✅ |
-| DropoutGrad | ✅ | ✅ | | | | | | | | | | | |
-| DynamicQuant | | ✅ | | | | | | | | | | | |
-| Eltwise | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| Elu | ✅ | ✅ | | | | | | | | | | | ✅ |
-| Equal | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| EmbeddingLookupFusion | | ✅ | | | | | | | | | | | |
-| Erf | ✅ | ✅ | | | | | | | | | | | ✅ |
-| ExpFusion | ✅ | ✅ | | | | | ✅ | ✅ | | | | | ✅ |
-| ExpandDims | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | ✅ |
-| Fill | ✅ | ✅ | ✅ | | | ✅ | ✅ | ✅ | | | | | ✅ |
-| Flatten | ✅ | ✅ | ✅ | | | | | | | | | ✅ | ✅ |
-| FlattenGrad | ✅ | ✅ | | | | | | | | | | | |
-| Floor | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | | ✅ |
-| FloorDiv | ✅ | ✅ | ✅ | | | | ✅ | ✅ | | | ✅ | | |
-| FloorMod | ✅ | ✅ | ✅ | | | | ✅ | ✅ | | | ✅ | | |
-| FullConnection | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| FusedBatchNorm | ✅ | ✅ | | ✅ | ✅ | | | | | | ✅ | | ✅ |
-| GatherNd | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | | | ✅ |
-| Gather | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | ✅ |
-| GatherD | ✅ | ✅ | ✅ | | | ✅ | | | | | | | ✅ |
-| GLU | | ✅ | | | | | | | | | | | |
-| Greater | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | | | ✅ | | ✅ |
-| GreaterEqual | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | | | ✅ | | ✅ |
-| GroupNormFusion | | ✅ | | | | | | | | | | | |
-| GRU | ✅ | ✅ | | | | | | | | | | | |
-| HashtableLookup | | ✅ | ✅ | | | | | | | | | | |
-| InstanceNorm | ✅ | ✅ | | | | | | | | | ✅ | | ✅ |
-| InvertPermutation | ✅ | ✅ | ✅ | | | | | | | | | | |
-| IsFinite | | ✅ | | | | | | | | | | | ✅ |
-| L2NormalizeFusion | | ✅ | | ✅ | ✅ | | | | | | | | |
-| LayerNormFusion | ✅ | ✅ | | ✅ | | | ✅ | ✅ | | | | | ✅ |
-| LayerNormGrad | ✅ | ✅ | | | | | | | | | | | |
-| LeakyReLU | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | | ✅ |
-| Less | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | | | ✅ | | ✅ |
-| LessEqual | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | | | ✅ | | ✅ |
-| LRN | | ✅ | | | | | | | | | | | ✅ |
-| Log | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | | ✅ |
-| Log1p | | ✅ | | | | | | | | | | | ✅ |
-| LogGrad | ✅ | ✅ | | | | | | | | | | | |
-| LogicalAnd | ✅ | ✅ | ✅ | | | ✅ | ✅ | ✅ | | | ✅ | | |
-| LogicalNot | ✅ | ✅ | | ✅ | ✅ | ✅ | ✅ | ✅ | | | ✅ | | |
-| LogicalOr | ✅ | ✅ | | | | ✅ | ✅ | ✅ | | | ✅ | | |
-| LogSoftmax | ✅ | ✅ | | | | | | | | | | | ✅ |
-| LshProjection | | ✅ | | | | | | | | | | | |
-| LSTM | ✅ | ✅ | | | | | | | | | | | |
-| LSTMGrad | | ✅ | | | | | | | | | | | |
-| LSTMGradData | | ✅ | | | | | | | | | | | |
-| LSTMGradWeight | | ✅ | | | | | | | | | | | |
-| MatMulFusion | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| Maximum | ✅ | ✅ | ✅ | | | | ✅ | ✅ | | | ✅ | | ✅ |
-| MaximumGrad | ✅ | ✅ | | | | | | | | | | | |
-| MaxPoolFusion | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| MaxPoolGrad | ✅ | ✅ | | | | | | | | | | | |
-| Merge | ✅ | ✅ | | | | | | | | | | | |
-| Minimum | ✅ | ✅ | ✅ | | | | ✅ | ✅ | | | ✅ | | ✅ |
-| MinimumGrad | ✅ | ✅ | | | | | | | | | | | |
-| Mod | | ✅ | ✅ | | | | | | | | | | ✅ |
-| MulFusion | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| MulGrad | | ✅ | | | | | | | | | | | |
-| Neg | ✅ | ✅ | ✅ | | | | ✅ | ✅ | | | ✅ | | ✅ |
-| NegGrad | ✅ | ✅ | | | | | | | | | | | |
-| NLLLoss | | ✅ | | | | | | | | | | | ✅ |
-| NLLLossGrad | | ✅ | | | | | | | | | | | |
-| NotEqual | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | | | ✅ | | |
-| NonMaxSupppression | | ✅ | | | | | | | | | | | ✅ |
-| NonZero | | | | | | ✅ | | | | | | | ✅ |
-| OneHot | ✅ | ✅ | ✅ | | | | ✅ | ✅ | ✅ | | | | |
-| OnesLike | ✅ | ✅ | ✅ | | | | | | | | | ✅ | ✅ |
-| PadFusion | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| PartialFusion | ✅ | ✅ | ✅ | | | ✅ | | | | | | | |
-| PowFusion | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | | ✅ | ✅ |
-| PowerGrad | | ✅ | | | | | | | | | | | |
-| PriorBox | | ✅ | | ✅ | ✅ | | | | | | | | ✅ |
-| PReLUFusion | ✅ | ✅ | | | | | ✅ | ✅ | | | | | ✅ |
-| QuantDTypeCast | ✅ | ✅ | | ✅ | ✅ | | | | | | | | |
-| RaggedRange | ✅ | ✅ | ✅ | | | | | | | | | | |
-| RandomNormal | ✅ | ✅ | | | | | | | | | | | |
-| RandomStandardNormal | ✅ | ✅ | | | | | | | | | | | |
-| Range | ✅ | ✅ | ✅ | | | | | | | | | | ✅ |
-| Rank | ✅ | ✅ | | | | | | | | | | | |
-| RealDiv | ✅ | ✅ | | | | | | | | | | | ✅ |
-| Reciprocal | ✅ | ✅ | | ✅ | | | | | | | ✅ | | ✅ |
-| ReduceFusion | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| ReduceScatter | | ✅ | | | | | | | | | | ✅ | |
-| Reshape | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | ✅ |
-| Resize | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅ | |
-| ResizeGrad | ✅ | ✅ | | | | | | | | | | | |
-| ReverseV2 | | ✅ | ✅ | | | | | | | | | | |
-| ReverseSequence | | ✅ | | | | | | | | | | | ✅ |
-| ROIPooling | | ✅ | | | | | | | | | | | ✅ |
-| Round | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | | ✅ |
-| Rsqrt | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | | |
-| RsqrtGrad | | ✅ | | | | | | | | | | | ✅ |
-| Select | | ✅ | | | | ✅ | | | | | | | |
-| Selu | | | | | | | | | | | | | |
-| ScaleFusion | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| ScatterNd | ✅ | ✅ | ✅ | | | | | | | | | | ✅ |
-| ScatterNdUpdate | ✅ | ✅ | ✅ | | | | | | | | | | |
-| SGD | | ✅ | | | | | | | | | | | ✅ |
-| Shape | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | | | ✅ |
-| SigmoidCroosEntropyWithLogits | | ✅ | | | | | | | | | | | ✅ |
-| SigmoidCroosEntropyWithLogitsGrad | | ✅ | | | | | | | | | | | ✅ |
-| Sin | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | | ✅ |
-| Size | ✅ | ✅ | ✅ | | | | | | | | | | ✅ |
-| SliceFusion | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | | | ✅ | | ✅ |
-| SkipGram | | ✅ | | | | | | | | | | | |
-| SmoothL1Loss | | ✅ | | | | | | | | | | | ✅ |
-| SmoothL1LossGrad | | ✅ | | | | | | | | | | | ✅ |
-| Softmax | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| SoftmaxGrad | | ✅ | | | | | | | | | | | |
-| Softplus | ✅ | ✅ | | | | | | | | | | | ✅ |
-| SpaceToBatch | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | | | |
-| SpaceToBatchND | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | | | |
-| SpaceToDepth | ✅ | ✅ | | | | | ✅ | ✅ | | | | | ✅ |
-| SparseToDense | ✅ | ✅ | ✅ | | | | ✅ | ✅ | ✅ | | | | |
-| SparseSoftmaxCrossEntropyWithLogits | | ✅ | | | | | | | | | | | ✅ |
-| Splice | ✅ | ✅ | | | | | | | | | | | |
-| Split | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | | | ✅ | | ✅ |
-| SplitWithOverlap | ✅ | ✅ | | | | | | | | | | | |
-| Sqrt | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| SqrtGrad | | ✅ | | | | | | | | | | | ✅ |
-| Square | ✅ | ✅ | | ✅ | ✅ | | ✅ | ✅ | | | ✅ | | ✅ |
-| SquaredDifference | ✅ | ✅ | | | | | ✅ | ✅ | | | | | |
-| Squeeze | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | ✅ | |
-| StridedSlice | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| StridedSliceGrad | ✅ | ✅ | | | | | | | | | | | |
-| Stack | ✅ | ✅ | ✅ | | | | ✅ | ✅ | | | | | ✅ |
-| SubFusion | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| SubGrad | | ✅ | | | | | | | | | | | |
-| Switch | ✅ | ✅ | ✅ | | | ✅ | | | | | | | |
-| SwitchLayer | ✅ | ✅ | ✅ | | | ✅ | | | | | | | |
-| TensorListFromTensor | ✅ | ✅ | ✅ | | | | | | | | | | |
-| TensorListGetItem | ✅ | ✅ | ✅ | | | | | | | | | | |
-| TensorListReserve | ✅ | ✅ | ✅ | | | | | | | | | | |
-| TensorListSetItem | ✅ | ✅ | ✅ | | | | | | | | | | |
-| TensorListStack | ✅ | ✅ | ✅ | | | | | | | | | | |
-| TensorScatterAdd | | ✅ | ✅ | | | | | | | | | | |
-| TileFusion | ✅ | ✅ | ✅ | | | ✅ | | | | | ✅ | | ✅ |
-| TopKFusion | ✅ | ✅ | ✅ | ✅ | ✅ | | | | | | | | ✅ |
-| Transpose | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | ✅ | | | ✅ | ✅ | ✅ |
-| UniformReal | | ✅ | ✅ | | | | | | | | | | |
-| Unique | ✅ | ✅ | ✅ | | | | | | | | | | |
-| UnsortedSegmentSum | ✅ | ✅ | ✅ | | | | | | | | | | |
-| Unsqueeze | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | ✅ | ✅ | |
-| Unstack | ✅ | ✅ | ✅ | | | | | | | | | | |
-| Where | ✅ | ✅ | ✅ | | | ✅ | | | | | | | |
-| ZerosLike | ✅ | ✅ | ✅ | | | | | | | | | | |
-
-MindSpore Lite转换工具支持第三方框架的算子列表:
-
-| 操作名
| 支持的TensorFlow Lite算子 | 支持的Caffe算子 | 支持的Onnx算子 | 支持的TensorFlow算子 |
-| ------------------------------------ | ------------------------------------------------------------ | ------------------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ |
-| Abs | Abs | | Abs | Abs |
-| Activation | Activation, ReLU, ReLU6, PReLU,
LeakyReLU, Tanh, HardSwish, Logistic | ReLU, ReLU6, Sigmoid, TanH, Elu | Relu, LeakyRelu, PRelu, Elu, Tanh, Sigmoid, HardSigmoid, Softplus,Gelu | Activation, Elu, Relu, Relu6, Sigmoid, Tanh, Selu, LeakyRelu, Softplus |
-| Adam | Adam | | | Adam |
-| AddFusion | Add | | Add, Int8Add | Add, AddV2 |
-| AdderFusion | | | adder_f | |
-| AddN | AddN | | | |
-| All | All | | | All |
-| ApplyMomentum | ApplyMomentum | | | ApplyMomentum |
-| Assert | | | | Assert |
-| Assign | Assign | | | Assign |
-| ArgmaxFusion | Argmax | ArgMax | ArgMax | ArgMax |
-| ArgminFusion | Argmin | | ArgMin | ArgMin |
-| AvgPoolFusion | MeanPooling | Pooling | AveragePool,
GlobalAveragePool,
Int8AveragePool | AvgPool |
-| BatchNorm | | BatchNorm | BatchNormalization | |
-| BatchToSpace | BatchToSpace | | | BatchToSpace |
-| BatchToSpaceND | BatchToSpaceND | | | BatchToSpaceND |
-| BiasAdd | | | BiasAdd | BiasAdd |
-| BinaryCrossEntropy | BinaryCrossEntropy | | | BinaryCrossEntropy |
-| BroadcastTo | BroadcastTo | | Expand | BroadcastTo |
-| Cast | Cast,
QUANTIZE,
DEQUANTIZE | | Cast | Cast |
-| Ceil | Ceil | | Ceil | Ceil |
-| Clip | Clip | | Clip | Clip |
-| Concat | Concat | Concat | Concat | ConcatV2 |
-| ConstantOfShape | | | ConstantOfShape | |
-| Conv2DFusion | Conv2D | Convolution | Conv, Int8Conv,
ConvRelu,
Int8ConvRelu | Conv2D |
-| Conv2dTransposeFusion | DeConv2D | Deconvolution | ConvTranspose | Conv2DBackpropInput |
-| Cos | Cos | | Cos | Cos |
-| Crop | | Crop | | |
-| CropAndResize | | | | CropAndResize |
-| CumSum | | | CumSum | Cumsum |
-| CustomExtractFeatures | ExtractFeatures | | | |
-| CustomNormalize | Normalize | | | |
-| CustomPredict | Predict | | | |
-| DepthToSpace | DepthToSpace | | DepthToSpace | DepthToSpace |
-| DetectionPostProcess | Custom | | | |
-| DivFusion | Div, RealDiv | | Div | Div, RealDiv |
-| Dropout | Dropout | | Dropout | Dropout |
-| DynamicQuant | | | DynamicQuantizeLinear | |
-| Eltwise | | Eltwise | Sum, Max[3] | |
-| Elu | | ELU | Elu,
NonMaxSuppression | NonMaxSuppressionV3 |
-| Equal | Equal | | Equal | Equal |
-| Erf | Erf | | Erf | Erf |
-| ExpFusion | Exp | Exp | Exp | Exp |
-| ExpandDims | ExpandDims | | | ExpandDims |
-| Fill | Fill | | | Fill |
-| Flatten | | Flatten | | |
-| Floor | flOOR | | Floor | Floor |
-| FloorDiv | FloorDiv | | | FloorDiv |
-| FloorMod | FloorMod | | | FloorMod |
-| FullConnection | FullyConnected | InnerProduct | | |
-| FusedBatchNorm | FusedBatchNorm | | | FusedBatchNorm,
FusedBatchNormV3 |
-| GatherNd | GatherND | | GatherND | GatherNd |
-| Gather | Gather | | Gather | GatherV2 |
-| Greater | Greater | | Greater | Greater |
-| GreaterEqual | GreaterEqual | | GreaterOrEqual | GreaterEqual |
-| HashtableLookup | HashtableLookup | | | |
-| InstanceNorm | InstanceNorm | | InstanceNormalization | |
-| InvertPermutation | | | | InvertPermutation |
-| IsFinite | IsFinite | | | IsFinite |
-| LeakyReLU | LeakyRelu | | LeakyRelu | LeakyRelu |
-| Less | Less | | Less | Less |
-| LessEqual | LessEqual | | | LessEqual |
-| LRN | LocalResponseNorm | | Lrn, LRN | |
-| Log | Log | | Log | Log |
-| Log1p | Log1p | | | Log1p |
-| LogicalAnd | LogicalAnd | | And | LogicalAnd |
-| LogicalNot | LogicalNot | | Not | LogicalNot |
-| LogicalOr | LogicalOr | | Or | LogicalOr |
-| LogSoftmax | LogSoftmax | | LogSoftmax | |
-| LshProjection | LshProjection | | | |
-| LSTM | | | LSTM | |
-| MatMulFusion | BatchMatMul | | MatMul,
Gemm | MatMul,
BatchMatMul,
BatchMatMulV2 |
-| Maximum | Maximum | | Max | Maximum |
-| MaxPoolFusion | MaxPooling | Pooling | MaxPool,
GlobalMaxPool | MaxPool |
-| Merge | | | | Merge |
-| Minimum | Minimum | | Min | Minimum |
-| MinimumGrad | | | | |
-| Mod | Mod | | Mod | Mod |
-| MulFusion | Mul | | Mul | Mul |
-| MulGrad | | | | |
-| Neg | Neg | | Neg | Neg |
-| NotEqual | NotEqual | | | NotEqual |
-| NonMaxSupppression | NonMaxSupppression | | NonMaxSupppression | NonMaxSupppression |
-| NonZero | NonZero | | NonZero | NonZero |
-| OneHot | OneHot | | OneHot | OneHot |
-| OnesLike | OnesLike | | | OnesLike |
-| PadFusion | Pad, MirrorPad, PadV2 | | Pad | MirrorPad, Pad, PadV2 |
-| PowFusion | Pow | Power | Pow[2] | Pow |
-| PReLUFusion | PRELU | PReLU | PRelu | |
-| RaggedRange | | | | RaggedRange |
-| RandomNormal | RandomNormal | | RandomNormal | RandomNormal |
-| RandomStandardNormal | | | | RandomStandardNormal |
-| Range | Range | | Range | Range |
-| Rank | Rank | | | Rank |
-| Reciprocal | | | Reciprocal | |
-| ReduceFusion | Sum, Mean, ReduceMax, ReduceMin, ReduceProd | Reduction | ReduceMean, ReduceMax, ReduceMin, ReduceProd, ReduceSum, ReduceSumSquare, ReduceL2,ReduceL1,ReduceLogSum | Sum, Max, Min, Mean, Prod, All |
-| Reshape | Reshape | Reshape | Reshape,
Flatten | Reshape |
-| Resize | ResizeBilinear,
NearestNeighbor | Interp | Resize, Upsample | ResizeBilinear,
ResizeBicubic,
ResizeNearestNeighbor |
-| ReverseV2 | reverse | | | ReverseV2 |
-| ReverseSequence | ReverseSequence | | ReverseSequence | ReverseSequence |
-| Round | Round | | Round | Round |
-| Rsqrt | Rsqrt | | | Rsqrt |
-| Select | | | | Select |
-| Selu | | | | Selu |
-| ScaleFusion | | Scale | | |
-| ScatterNd | ScatterNd | | ScatterND | |
-| ScatterNdUpdate | ScatterNdUpdate | | ScatterNdUpdate | |
-| SGD | SGD | SGD | | SGD |
-| Shape | Shape | | Shape | Shape |
-| Sin | Sin | | Sin | Sin |
-| Size | | | | Size |
-| SliceFusion | Slice | Slice | Slice | Slice |
-| SkipGram | SKipGram | | | |
-| Softmax | Softmax | Softmax | Softmax | Softmax |
-| Softplus | | | | Softplus |
-| SpaceToBatch | SpaceToBatch | | | |
-| SpaceToBatchND | SpaceToBatchND | | | SpaceToBatchND |
-| SpaceToDepth | SpaceToDepth | | SpaceToDepth | |
-| SparseToDense | SpareToDense | | | |
-| Splice | | | Splice | |
-| Split | Split, SplitV | | Split | Split, SplitV |
-| Sqrt | Sqrt | | Sqrt | Sqrt |
-| Square | Square | | | Square |
-| SquaredDifference | SquaredDifference | | | SquaredDifference |
-| Squeeze | Squeeze | | Squeeze | Squeeze |
-| StridedSlice | StridedSlice | | Slice,
DynamicSlice | StridedSlice |
-| Stack | Stack | | | Pack |
-| SubFusion | Sub | | Sub | Sub |
-| Switch | | | | Switch |
-| TensorListFromTensor | | | | TensorListFromTensor |
-| TensorListGetItem | | | | TensorListGetItem |
-| TensorListReserve | | | | TensorListReserve |
-| TensorListSetItem | | | | TensorListSetItem |
-| TensorListStack | | | | TensorListStack |
-| TensorScatterAdd | TensorScatterAdd | | | TensorScatterAdd |
-| TileFusion | Tile | Tile | Tile | Tile |
-| TopKFusion | TopKV2 | | TopK | TopKV2 |
-| Transpose | Transpose | Permute | Transpose, Int8Transpose | Transpose |
-| Unique | Unique | | | |
-| UnsortedSegmentSum | | | | UnsortedSegmentSum |
-| Unsqueeze | | | Unsqueeze | |
-| Unstack | Unstack | | | |
-| Where | Where | | NonZero, Where | Where |
-| ZerosLike | ZerosLike | | | ZerosLike |
-| 转换工具支持的其他算子[4] | | | Constant,
Atan, Asin, Tan,
Loop, Dropout, If, Identity,
Int8GivenIntTensorFill,
Int8GivenTensorFill,
Int8Quantize,
Int8Dequantize,
LpNormalization | Dropout, Enter,
Exit, If,
LinSpace,
LoopCond,
NextIteration,
StatelessIf,
StatelessWhile,
TensorArrayGatherV3,
TensorArrayReadV3,
TensorArrayScatterV3,
TensorArraySizeV3,
TensorArrayV3,
TensorArrayWriteV3,
While |
-
-[1] Clip:仅支持将clip(0, 6)转换为Relu6。
-
-[2] Pow:仅支持指数为单个常数。
-
-[3] Sum与Max:仅支持输入个数为2。
-
-[4] [转换工具](https://www.mindspore.cn/lite/docs/zh-CN/master/converter/converter_tool.html)支持,但不需要具体实现的算子,一般这类算子在转化工具中被优化而消失,如被融合掉或者使用其他算子代替。
-
-[5] 当前支持使用环境变量export KEEP_ORIGIN_DTYPE=1来保持数据类型为int64,当使用int32数据类型存在溢出时可以考虑使用该选项,但是目前仅为实验性选项,后续将移除。
-
-[6] 目前MindSpore导出的MindIR中部分算子不支持,对应的MindSpore接口为ops.matmul、ops.dense、ops.max、ops.min。其中Max以及Min算子只在axis参数为None时不支持,其他场景支持。
+| 算子名称 | 算子功能 | CPU | NPU(麒麟) | GPU(Mali/Adreno) |
+| ----------------------------------- | ------------------------------------------------------------ | --------------------------------------------------- | --------- | ----------------------- |
+| Abs | 逐元素计算绝对值 | FP16
FP32
Int32
Int8
UInt8 | FP16 | FP16
FP32 |
+| AbsGrad | 计算绝对值函数的梯度 | FP32 | - | - |
+| Activation | 激活函数 | FP16
FP32
Int32
Int8
UInt8 | FP16 | FP16
FP32 |
+| ActivationGrad | 计算特定激活函数的梯度 | FP16
FP32 | - | - |
+| Adam | 执行Adam优化器的一次参数更新步骤 | FP32 | - | - |
+| AddFusion | 逐元素计算加法 | FP16
FP32
Int32
Int8
UInt8
Bool | FP16 | FP16
FP32
Int8 |
+| AdderFusion | 逐元素加法 | FP32 | - | - |
+| AddGrad | 计算加法操作的梯度 | FP32 | - | - |
+| AddN | 对N个相同形状和数据类型的输入张量进行逐元素相加 | FP16
FP32 | - | - |
+| Affine | 对输入张量执行仿射变换 | FP32 | - | - |
+| All | 判断张量中所有元素在指定维度上是否都为True(非零) | FP32 | - | - |
+| AllGather | 分布式集合通信操作 | FP32 | - | - |
+| ApplyMomentum | 执行带动量的随机梯度下降 的一次参数更新步骤 | FP32 | - | - |
+| Assert | 断言 | FP16
FP32
Bool | - | - |
+| Assign | 将一个值赋值给一个变量 | FP32 | - | - |
+| ArgmaxFusion | 求某一维度最大值 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| ArgminFusion | 求某一维度最小值 | FP16
FP32
Int8
UInt8 | - | FP16
FP32 |
+| AvgPoolFusion | 平均池化 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| AvgPoolGrad | 计算平均池化层的梯度 | FP16
FP32 | - | - |
+| BatchNorm | 批量归一化 | FP16
FP32
Int8
UInt8 | - | FP16
FP32 |
+| BatchNormGrad | 计算批量归一化层的梯度 | FP16
FP32 | - | - |
+| BatchToSpace | 空间到批次变换的逆操作 | FP32
Int8
UInt8 | - | FP16
FP32 |
+| BatchToSpaceND | BatchToSpace的ND通用版本 | FP16
FP32
Int8
UInt8 | - | FP16
FP32 |
+| BiasAdd | 将偏置向量添加到输入张量 | FP16
FP32
Int8
UInt8 | - | FP16
FP32 |
+| BiasAddGrad | 计算 BiasAdd操作的梯度 | FP16
FP32 | - | - |
+| BinaryCrossEntropy | 计算二元交叉熵损失 | FP32 | - | - |
+| BinaryCrossEntropyGrad | 计算二元交叉熵损失函数的梯度 | FP32 | - | - |
+| BroadcastTo | 扩维 | FP16
FP32
Int32
Bool | - | - |
+| Call | 调用一个子计算图或函数 | FP16
FP32
Int32
Bool | - | - |
+| Cast | 数据类型转换 | FP16
FP32
Int32
Int8
UInt8
Bool | FP16 | FP16
FP32 |
+| Ceil | 向上取整 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| Clip | 限制元素范围 | FP32
Int32 | - | - |
+| Concat | 拼接张量 | FP16
FP32
Int32
Int8
UInt8
Bool | FP16 | FP16
FP32
Int32 |
+| ConstantOfShape | 生成一个与输入形状相同的张量,并用指定常量填充 | FP16
FP32
Int32 | - | - |
+| Conv2DFusion | 2D卷积 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| Conv2DBackpropFilterFusion | 计算普通卷积操作对卷积核的梯度 | FP16
FP32 | - | - |
+| Conv2DBackpropInputFusion | 计算普通卷积操作对输入数据的梯度 | FP16
FP32 | - | - |
+| Conv2dTransposeFusion | 执行转置卷积运算 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| Cos | 逐元素计算余弦 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| Crop | 从输入图像或特征图中裁剪出一个指定区域 | FP16
FP32
Int32
Int8
UInt8 | - | - |
+| CropAndResize | 从输入图像中根据一组边界框裁剪出区域,然后将每个区域缩放到统一大小 | FP32 | FP16 | - |
+| CumSum | 累计元素和 | FP32
Int32 | - | - |
+| CustomExtractFeatures | 自定义特征提取算子 | FP32 | - | - |
+| CustomNormalize | 自定义归一化算子 | FP32 | - | - |
+| CustomPredict | 自定义预测算子 | FP32
Int32 | - | - |
+| DEConv2DGradFilter | 计算转置卷积对卷积核的梯度 | FP32 | - | |
+| DepthToSpace | 将深度数据重新排列到空间维度中 | FP16
FP32
Int8
UInt8 | - | FP16
FP32 |
+| DetectionPostProcess | 目标检测后处理 | FP32
Int8
UInt8 | - | - |
+| DivFusion | 逐元素除法 | FP16
FP32
Int32
Int8
UInt8 | FP16 | FP16
FP32 |
+| DivGrad | 计算除法操作的梯度 | FP32 | - | - |
+| Dropout | 随机将输入张量的部分元素置 0 | FP16
FP32 | - | - |
+| DropoutGrad | 计算Dropout操作的梯度 | FP16
FP32 | - | - |
+| DynamicQuant | 动态将浮点张量量化为uint8类型 | FP32 | - | - |
+| Eltwise | 元素级运算 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| Elu | 激活函数,对负输入使用指数修正 | FP16
FP32 | - | - |
+| Equal | 判断输入是否相等 | FP16
FP32
Int32
Int8
UInt8 | FP16 | FP16
FP32 |
+| EmbeddingLookupFusion | 优化版的词嵌入查找,将整数索引映射为密集向量 | FP32 | - | - |
+| Erf | 误差函数 | FP16
FP32 | - | - |
+| ExpFusion | 逐元素取指数 | FP16
FP32 | - | FP16
FP32 |
+| ExpandDims | 在指定位置插入长度为1的维度 | FP16
FP32
Int32
Int8
UInt8
Bool | FP16 | FP16
FP32
Int32 |
+| Fill | 生成一个填充指定常量的张量 | FP16
FP32
Int32
Bool | - | FP16
FP32 |
+| Flatten | 数据按维度展开 | FP16
FP32
Int32 | - | - |
+| FlattenGrad | 计算Flatten操作的梯度 | FP16
FP32 | - | - |
+| Floor | 向下取整 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| FloorDiv | 逐元素向下取整除法 | FP16
FP32
Int32 | FP16 | FP16
FP32 |
+| FloorMod | 逐元素取模运算,结果的符号与除数一致 | FP16
FP32
Int32 | FP16 | FP16
FP32 |
+| FullConnection | 全连接层 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| FusedBatchNorm | 对输入做标准化 | FP16
FP32
Int8
UInt8 | FP16 | - |
+| GatherNd | 根据索引张量从输入张量中收集指定位置的元素 | FP16
FP32
Int32
Int8
UInt8
Bool | - | FP16
FP32 |
+| Gather | 沿单一维度收集指定索引位置的元素 | FP16
FP32
Int32
Int8
UInt8
Bool | FP16 | FP16
FP32
Int32 |
+| GatherD | 将输入tensor中的元素根据索引tensor进行收集 | FP16
FP32
Int32
Bool | - | - |
+| GLU | 门控线性单元激活函数,将输入拆分为两部分并逐元素相乘 | FP32 | - | - |
+| Greater | 逐元素比较两个张量,返回A>B的逻辑结果(True/False) | FP16
FP32
Int32
Int8
UInt8 | FP16 | FP16
FP32 |
+| GreaterEqual | 逐元素比较两个张量,返回 A≥B的逻辑结果(True/False) | FP16
FP32
Int32
Int8
UInt8 | FP16 | FP16
FP32 |
+| GroupNormFusion | 融合优化的组归一化 | FP32 | - | - |
+| GRU | 门控循环单元,简化版LSTM | FP16
FP32 | - | - |
+| HashtableLookup | 哈希表查找 | FP32
Int32 | - | - |
+| InstanceNorm | 实例归一化 | FP16
FP32 | FP16 | - |
+| InvertPermutation | 反转置换索引 | FP16
FP32
Int32 | - | - |
+| IsFinite | 检测张量中每个元素是否为有限值(非inf/NaN) | FP32 | - | - |
+| L2NormalizeFusion | 融合优化的L2归一化 | FP32
Int8
UInt8 | - | - |
+| LayerNormFusion | 融合优化的层归一化 | FP16
FP32
Int8 | - | FP16
FP32 |
+| LayerNormGrad | 计算层归一化的梯度 | FP16
FP32 | - | - |
+| LeakyReLU | 带泄漏的 ReLU激活函数,对负输入给予微小斜率 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| Less | 逐元素比较两个张量,返回 AFP32
Int32
Int8
UInt8 | FP16 | FP16
FP32 |
+| LessEqual | 逐元素比较A ≤ B,返回布尔张量 | FP16
FP32
Int32
Int8
UInt8 | FP16 | FP16
FP32 |
+| LRN | 局部响应归一化 | FP32 | - | - |
+| Log | 逐元素求对数 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| Log1p | 计算log(1+X) | FP32 | - | - |
+| LogGrad | 计算对数函数的梯度 | FP16
FP32 | - | - |
+| LogicalAnd | 逐元素逻辑与运算 | FP16
FP32
Int32
Bool | FP16 | FP16
FP32 |
+| LogicalNot | 元素级逻辑非 | FP16
FP32
Int8
UInt8
Bool | FP16 | FP16
FP32 |
+| LogicalOr | 逐元素逻辑或运算 | FP16
FP32
Bool | FP16 | FP16
FP32 |
+| LogSoftmax | 对输入向量进行softmax操作,然后再对softmax结果取对数 | FP16
FP32 | - | - |
+| LshProjection | 局部敏感哈希投影 | FP32 | - | - |
+| LSTM | 长短期记忆网络单元 | FP16
FP32 | - | - |
+| LSTMGrad | 计算LSTM对隐状态的反向传播梯度 | FP32 | - | - |
+| LSTMGradData | 计算LSTM对输入数据的反向传播梯度 | FP32 | - | - |
+| LSTMGradWeight | 计算LSTM对权重的反向传播梯度 | FP32 | - | - |
+| MatMulFusion | 对2个输入做矩阵乘法运算 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| Maximum | 取元素级最大值 | FP16
FP32
Int32 | FP16 | FP16
FP32 |
+| MaximumGrad | 计算最大值函数的梯度 | FP16
FP32 | - | - |
+| MaxPoolFusion | 最大池化 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| MaxPoolGrad | 计算最大池化层的梯度 | FP16
FP32 | - | - |
+| Merge | 创建一个与输入张量X形状完全相同但所有元素值均为1的新张量 | FP16
FP32 | - | - |
+| Minimum | 取元素级最小值 | FP16
FP32
Int32 | FP16 | FP16
FP32 |
+| MinimumGrad | 计算最小值函数的梯度 | FP16
FP32 | - | - |
+| Mod | 返回除法元素的余数 | FP32
Int32 | - | - |
+| MulFusion | 逐元素乘法 | FP16
FP32
Int32
Int8
UInt8 | FP16 | FP16
FP32 |
+| MulGrad | 计算乘法操作的梯度 | FP32 | - | - |
+| Neg | 逐元素求负数 | FP16
FP32
Int32 | FP16 | FP16
FP32 |
+| NegGrad | 计算取负操作的梯度 | FP16
FP32 | - | - |
+| NLLLoss | 计算负对数似然损失 | FP32 | - | - |
+| NLLLossGrad | 计算NLLLoss的梯度 | FP32 | - | - |
+| NotEqual | 逐元素比较两个张量,返回 A != B的逻辑结果 | FP16
FP32
Int32
Int8
UInt8 | FP16 | FP16
FP32 |
+| NonMaxSupppression | 非极大值抑制 | FP32 | - | - |
+| NonZero | 返回输入张量中所有非零元素的索引 | Bool | - | - |
+| OneHot | 将整数索引张量转换为独热编码表示 | FP16
FP32
Int32 | - | FP16
FP32
Int32 |
+| OnesLike | 创建一个与输入张量 X形状完全相同但所有元素值均为1的新张量 | FP16
FP32
Int32 | - | - |
+| PadFusion | 将输入张量加上指定的padding,使其达到指定的大小 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| PartialFusion | 部分融合 | FP16
FP32
Int32
Bool | - | - |
+| PowFusion | 逐元素求幂 | FP16
FP32
Int8
UInt8 | - | FP16
FP32 |
+| PowerGrad | 计算幂运算的梯度 | FP32 | - | - |
+| PriorBox | 生成先验框 | FP32
Int8
UInt8 | - | - |
+| PReLUFusion | PRelu激活函数 | FP16
FP32 | - | FP16
FP32 |
+| QuantDTypeCast | 执行量化数据类型转换 | FP16
FP32
Int8
UInt8 | - | - |
+| RaggedRange | 生成非均匀间隔的序列 | FP16
FP32
Int32 | - | - |
+| RandomNormal | 生成一个张量,其中的值从正态分布中随机采样 | FP16
FP32 | - | - |
+| RandomStandardNormal | 生成服从标准正态分布的随机数张量 | FP16
FP32 | - | - |
+| Range | 生成某个区间内的元素 | FP16
FP32
Int32 | - | - |
+| Rank | 返回输入张量的维度数 | FP16
FP32 | - | - |
+| RealDiv | 逐元素除法 | FP16
FP32 | - | - |
+| Reciprocal | 返回倒数 | FP16
FP32
Int8 | FP16 | - |
+| ReduceFusion | 归约操作 | FP16
FP32
Int32
Int8
UInt8
Bool | FP16 | FP16
FP32 |
+| ReduceScatter | 分布式操作,将输入张量分段后分发到各设备,每设备仅保留一段结果 | FP32 | - | - |
+| Reshape | 改变张量形状,总元素个数不变 | FP16
FP32
Int32
Int8
UInt8
Bool | FP16 | FP16
FP32
Int32 |
+| Resize | 对输入张量进行上采样或调整大小 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| ResizeGrad | 计算Resize的梯度 | FP16
FP32 | - | - |
+| ReverseV2 | 沿指定轴反转张量 | FP32
Int32 | - | - |
+| ReverseSequence | 对输入张量的可变长度序列进行部分反转 | FP32 | - | - |
+| ROIPooling | 区域兴趣池化 | FP32 | - | - |
+| Round | 四舍五入到最接近的整数数值 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| Rsqrt | 逐元素计算平方根倒数,用于归一化 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| RsqrtGrad | 计算平方根倒数的梯度 | FP32 | - | - |
+| Select | 根据条件从两个张量中选择元素 | FP32
Bool | - | - |
+| Selu | 自归一化指数线性单元激活函数 | - | - | - |
+| ScaleFusion | 将缩放操作与相邻算子融合 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| ScatterNd | 根据索引将更新张量中的值散射到输出张量的指定位置 | FP16
FP32
Int32 | - | - |
+| ScatterNdUpdate | 使用给定值以及输入索引更新输入数据的值 | FP16
FP32
Int32 | - | - |
+| SGD | 随机梯度下降优化器 | FP32 | - | - |
+| Shape | 获得张量shape | FP16
FP32
Int32
Int8
UInt8
Bool | - | FP16
FP32 |
+| SigmoidCroosEntropyWithLogits | 结合Sigmoid激活和交叉熵损失 | FP32 | - | - |
+| SigmoidCroosEntropyWithLogitsGrad | 计算带Sigmoid的交叉熵损失的梯度 | FP32 | - | - |
+| Sin | 逐元素计算正弦 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| Size | 获取张量维度大小 | FP16
FP32
Int32 | - | - |
+| SliceFusion | 张量切片操作 | FP16
FP32
Int32
Int8
UInt8 | FP16 | FP16
FP32 |
+| SkipGram | Skip-gram模型的核心操作,用于词向量训练 | FP32 | - | - |
+| SmoothL1Loss | 平滑L1损失 | FP32 | - | - |
+| SmoothL1LossGrad | 计算平滑L1损失的梯度 | FP32 | - | - |
+| Softmax | 归一化操作 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| SoftmaxGrad | 计算Softmax的梯度 | FP32 | - | - |
+| Softplus | 平滑的ReLU变体 | FP16
FP32 | - | - |
+| SpaceToBatch | 高度和宽度维度的值移至深度维度 | FP16
FP32
Int8
UInt8 | - | FP16
FP32 |
+| SpaceToBatchND | 将空间维度的数据块拆分到批次维度 | FP16
FP32
Int8
UInt8 | - | FP16
FP32 |
+| SpaceToDepth | 将空间数据重组为深度通道 | FP16
FP32 | - | FP16
FP32 |
+| SparseToDense | 将稀疏表示转换为密集张量 | FP16
FP32
Int32 | - | FP16
FP32
Int32 |
+| SparseSoftmaxCrossEntropyWithLogits | 稀疏标签的Softmax交叉熵 | FP32 | - | - |
+| Splice | 沿指定轴连接输入张量的多个切片或范围 | FP16
FP32 | - | - |
+| Split | 将输入张量沿指定轴分割成多个较小的输出张量 | FP16
FP32
Int32
Int8
UInt8 | FP16 | FP16
FP32 |
+| SplitWithOverlap | 带重叠的分割张量 | FP16
FP32 | - | - |
+| Sqrt | 逐元素开根号 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| SqrtGrad | 计算平方根的梯度 | FP32 | - | - |
+| Square | 逐元素平方 | FP16
FP32
Int8
UInt8 | FP16 | FP16
FP32 |
+| SquaredDifference | 逐元素计算 (A-B)² | FP16
FP32 | - | FP16
FP32 |
+| Squeeze | 移除维度为1的维度 | FP16
FP32
Int32
Int8
UInt8
Bool | - | FP16
FP32
Int32 |
+| StridedSlice | Tensor切片 | FP16
FP32
Int32
Int8
UInt8 | FP16 | FP16
FP32 |
+| StridedSliceGrad | 计算切片操作的梯度 | FP16
FP32 | - | - |
+| Stack | 沿新轴堆叠多个张量 | FP16
FP32
Int32 | - | FP16
FP32 |
+| SubFusion | 逐元素相减 | FP16
FP32
Int32
Int8
UInt8 | FP16 | FP16
FP32 |
+| SubGrad | 计算减法的梯度 | FP32 | - | - |
+| Switch | 根据布尔条件选择输出分支 | FP16
FP32
Int32
Bool | - | - |
+| SwitchLayer | 在模型中选择执行不同的子网络分支 | FP16
FP32
Int32
Bool | - | - |
+| TensorListFromTensor | 将普通张量转换为张量列表,按指定轴分割 | FP16
FP32
Int32 | - | - |
+| TensorListGetItem | 从张量列表中获取指定索引位置的张量 | FP16
FP32
Int32 | - | - |
+| TensorListReserve | 预分配一个空张量列表,指定元素数据类型和初始容量 | FP16
FP32
Int32 | - | - |
+| TensorListSetItem | 将张量插入张量列表的指定位置 | FP16
FP32
Int32 | - | - |
+| TensorListStack | 将张量列表堆叠为一个普通张量 | FP16
FP32
Int32 | - | - |
+| TensorScatterAdd | 根据索引将更新张量的值分散添加到目标张量的指定位置 | FP32
Int32 | - | - |
+| TileFusion | 平铺给定矩阵 | FP16
FP32
Int32
Bool | FP16 | - |
+| TopKFusion | 从输入张量中返回topK个元素 | FP16
FP32
Int32
Int8
UInt8 | - | - |
+| Transpose | Tensor转置 | FP16
FP32
Int32
Int8
Bool | FP16 | FP16
FP32 |
+| UniformReal | 生成服从均匀分布的随机数张量 | FP32
Int32 | - | - |
+| Unique | 返回输入张量中的唯一值,并可返回值的索引和计数 | FP16
FP32
Int32 | - | - |
+| UnsortedSegmentSum | 对张量进行分段求和,不要求分段索引有序 | FP16
FP32
Int32 | - | - |
+| Unsqueeze | 将输入张量添加一个新的维度 | FP16
FP32
Int32
Int8
UInt8
Bool | FP16 | FP16
FP32
Int32 |
+| Unstack | 沿指定轴拆分张量为多个子张量 | FP16
FP32
Int32 | - | - |
+| Where | 元素选择 | FP16
FP32
Int32
Bool | - | - |
+| ZerosLike | 生成与输入张量形状相同但全为 0的新张量 | FP16
FP32
Int32 | - | - |
diff --git a/docs/lite/docs/source_zh_cn/reference/operator_list_lite_for_caffe.md b/docs/lite/docs/source_zh_cn/reference/operator_list_lite_for_caffe.md
new file mode 100644
index 0000000000000000000000000000000000000000..761eb5f76d4ccd10357da1f4cbe57e403f3ad150
--- /dev/null
+++ b/docs/lite/docs/source_zh_cn/reference/operator_list_lite_for_caffe.md
@@ -0,0 +1,31 @@
+# MindSpore Lite支持的Caffe算子列表
+
+[](https://gitee.com/mindspore/docs/blob/master/docs/lite/docs/source_zh_cn/reference/operator_list_lite_for_caffe.md)
+
+| MindSpore Lite算子名称 | 对应的Caffe算子 |
+| ---------------------- | -------------------------------- |
+| Activation | ReLU, ReLU6, Sigmoid, TanH, Elu |
+| ArgmaxFusion | ArgMax |
+| AvgPoolFusion | Pooling |
+| BatchNorm | BatchNorm |
+| Concat | Concat |
+| Conv2DFusion | Convolution |
+| Conv2dTransposeFusion | Deconvolution |
+| Crop | Crop |
+| Eltwise | Eltwise |
+| Elu | ELU |
+| ExpFusion | Exp |
+| Flatten | Flatten |
+| FullConnection | InnerProduct |
+| MaxPoolFusion | Pooling |
+| PowFusion | Power |
+| PReLUFusion | PReLU |
+| ReduceFusion | Reduction |
+| Reshape | Reshape |
+| Resize | Interp |
+| ScaleFusion | Scale |
+| SGD | SGD |
+| SliceFusion | Slice |
+| Softmax | Softmax |
+| TileFusion | Tile |
+| Transpose | Permute |
diff --git a/docs/lite/docs/source_zh_cn/reference/operator_list_lite_for_onnx.md b/docs/lite/docs/source_zh_cn/reference/operator_list_lite_for_onnx.md
new file mode 100644
index 0000000000000000000000000000000000000000..2ec2250b7a381fd5dca30fc765984bebbd800d0e
--- /dev/null
+++ b/docs/lite/docs/source_zh_cn/reference/operator_list_lite_for_onnx.md
@@ -0,0 +1,100 @@
+# MindSpore Lite支持的ONNX算子列表
+
+[](https://gitee.com/mindspore/docs/blob/master/docs/lite/docs/source_zh_cn/reference/operator_list_lite_for_onnx.md)
+
+> - 以下所有算子,均不支持int64类型输入。
+> - 当前支持使用环境变量export KEEP_ORIGIN_DTYPE=1来保持数据类型为int64,当使用int32数据类型存在溢出时可以考虑使用该选项,但是目前仅为实验性选项,后续将移除。
+
+| MindSpore Lite算子名称 | 算子功能 | 对应ONNX算子名称 | 算子规格 |
+| ---------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ |
+| Abs | 逐元素计算绝对值 | Abs | 不支持uint8类型。不支持输入张量量化参数为空。 |
+| Activation | 激活函数 | Relu, LeakyRelu, PRelu, Elu, Tanh, Sigmoid, HardSigmoid, Softplus,Gelu | - |
+| AddFusion | 逐元素计算加法 | Add, Int8Add | - |
+| AdderFusion | 逐元素加法 | adder_f | - |
+| ArgmaxFusion | 求某一维度最大值 | ArgMax | 不支持uint8类型。不支持输入张量量化参数为空。 |
+| ArgminFusion | 求某一维度最小值 | ArgMin | - |
+| AvgPoolFusion | 平均池化 | AveragePool, GlobalAveragePool, Int8AveragePool | - |
+| BatchNorm | 批量归一化 | BatchNormalization | - |
+| BiasAdd | 将偏置向量(bias)添加到输入张量 | BiasAdd | - |
+| BroadcastTo | 扩维 | Expand | - |
+| Cast | 数据类型转换 | Cast | 不支持以下数值类型转换:fp32转int8、fp32转uint32、int32转int8、int32转uint32、int32转uint8、int8转bool、int8转uint8。 |
+| Ceil | 向上取整 | Ceil | - |
+| Clip | 限制元素范围 | Clip | 仅支持将clip(0, 6)转换为Relu6。 |
+| Concat | 拼接张量 | Concat | - |
+| ConstantOfShape | 生成一个与输入形状相同的张量,并用指定常量填充 | ConstantOfShape | - |
+| Conv2DFusion | 2D卷积 | Conv, Int8Conv, ConvRelu, Int8ConvRelu | - |
+| Conv2dTransposeFusion | 执行转置卷积运算 | ConvTranspose | - |
+| Cos | 逐元素计算余弦 | Cos | - |
+| CumSum | 累计元素和 | CumSum | - |
+| DepthToSpace | 将深度数据重新排列到空间维度中 | DepthToSpace | 不支持uint8类型。不支持未知维度输入。 |
+| DivFusion | 逐元素除法 | Div | 不支持除数为0。 |
+| Dropout | 随机将输入张量的部分元素置 0 | Dropout | - |
+| DynamicQuant | 动态将浮点张量量化为 uint8类型 | DynamicQuantizeLinear | - |
+| Eltwise | 元素级运算 | Sum, Max | 仅支持输入个数为2。 |
+| Elu | 激活函数,对负输入使用指数修正 | Elu, NonMaxSuppression | - |
+| Equal | 判断输入是否相等 | Equal | 不支持uint8输入;int8输入不支持bool输出。 |
+| Erf | 误差函数 | Erf | - |
+| ExpFusion | 逐元素取指数 | Exp | - |
+| Flatten | 数据按维度展开 | Flatten | 不支持uint8类型。 |
+| Floor | 向下取整 | Floor | - |
+| FusedBatchNorm | 对输入做标准化 | BatchNormalization | - |
+| GatherNd | 根据索引张量从输入张量中收集指定位置的元素 | GatherND | - |
+| Gather | 沿单一维度收集指定索引位置的元素 | Gather | 不支持uint8类型。不支持QuantType_QUANT_NONE量化类型。 |
+| GatherD | 将输入tensor中的元素根据索引tensor进行收集 | GatherElements | - |
+| GatherNd | 将输入张量的切片聚合成具有indices指定维度的新张量 | GatherND | - |
+| Greater | 逐元素比较两个张量,返回 A > B的逻辑结果(True/False) | Greater | - |
+| GreaterEqual | 逐元素比较两个张量,返回 A ≥ B的逻辑结果 | GreaterOrEqual | - |
+| InstanceNorm | 实例归一化 | InstanceNormalization | - |
+| LeakyReLU | 带泄漏的 ReLU激活函数,对负输入给予微小斜率 | LeakyRelu | - |
+| Less | 逐元素比较两个张量,返回 A < B的逻辑结果。 | Less | - |
+| LRN | 局部响应归一化 | Lrn, LRN | - |
+| Log | 逐元素求对数 | Log | 不支持负数输入。 |
+| LogicalAnd | 逐元素逻辑与(AND)运算 | And | - |
+| LogicalNot | 元素级逻辑非 | Not | - |
+| LogicalOr | 逐元素逻辑或(OR)运算 | Or | - |
+| LogSoftmax | 对输入向量进行softmax操作,然后再对softmax结果取对数 | LogSoftmax | 不支持inf输入。 |
+| LRN | 局部响应标准化,用于防止数据过度拟合 | LRN | - |
+| LSTM | 长短期记忆网络单元 | LSTM | - |
+| MatMulFusion | 对2个输入做矩阵乘法运算;使用输入张量、一组学习的权重计算内积,并添加偏差 | MatMul, Gemm | - |
+| Maximum | 取元素级最大值 | Max | - |
+| MaxPoolFusion | 最大池化 | MaxPool, GlobalMaxPool | - |
+| Minimum | 取元素级最小值 | Min | - |
+| Mod | 返回除法元素的余数 | Mod | - |
+| MulFusion | 逐元素乘法 | Mul | - |
+| Neg | 逐元素求负数 | Neg | - |
+| NonMaxSupppression | 非极大值抑制 | NonMaxSupppression | - |
+| NonZero | 返回输入张量中所有非零元素的索引 | NonZero | - |
+| OneHot | 将整数索引张量转换为独热编码(One-Hot)表示 | OneHot | - |
+| PadFusion | 将输入张量加上指定的 padding,使其达到指定的大小 | Pad | 不支持int32类型。 |
+| PowFusion | 逐元素求幂 | Pow | 仅支持指数为单个常数。 |
+| PReLUFusion | PRelu激活函数 | PRelu | - |
+| RandomNormal | 生成一个张量,其中的值从正态分布(高斯分布) 中随机采样 | RandomNormal | - |
+| Range | 生成某个区间内的元素 | Range | - |
+| Reciprocal | 返回倒数 | Reciprocal | - |
+| ReduceFusion | 归约操作 | ReduceMean, ReduceMax, ReduceMin, ReduceProd, ReduceSum, ReduceSumSquare, ReduceL2,ReduceL1,ReduceLogSum | - |
+| Reshape | 改变张量形状,总元素个数不变 | Reshape, Flatten | - |
+| Resize | 对输入张量进行上采样或调整大小 | Resize, Upsample | - |
+| ReverseSequence | 对输入张量的可变长度序列进行部分反转 | ReverseSequence | - |
+| Round | 四舍五入到最接近的整数数值 | Round | - |
+| ScatterNd | 根据索引将更新张量中的值散射到输出张量的指定位置 | ScatterND | - |
+| ScatterNdUpdate | 使用给定值以及输入索引更新输入数据的值 | ScatterNdUpdate | - |
+| Shape | 获得张量shape | Shape | - |
+| Sin | 逐元素计算正弦 | Sin | - |
+| Size | 获取张量维度大小 | Size | - |
+| SliceFusion | 张量切片操作 | Slice | - |
+| Softmax | 归一化操作 | Softmax | - |
+| SpaceToDepth | 高度和宽度维度的值移至深度维度 | SpaceToDepth | - |
+| Splice | 沿指定轴连接输入张量的多个切片或范围。 | Splice | - |
+| Split | 将输入张量沿指定轴分割成多个较小的输出张量。 | Split | - |
+| Sqrt | 逐元素开根号 | Sqrt | - |
+| Squeeze | 移除维度为1的维度 | Squeeze | - |
+| StridedSlice | Tensor切片 | Slice, DynamicSlice | - |
+| SubFusion | 逐元素相减 | Sub | - |
+| TileFusion | 平铺给定矩阵 | Tile | 不支持int8类型。 |
+| TopKFusion | 从输入张量中返回top K个元素 | TopK | - |
+| Transpose | Tensor转置 | Transpose, Int8Transpose | - |
+| Tril | 下三角矩阵 | Trilu(属性upper=0) | - |
+| Triu | 上三角矩阵 | Trilu(属性upper=1) | - |
+| Unsqueeze | 将输入张量添加一个新的维度 | Unsqueeze | - |
+| Where | 元素选择 | NonZero, Where | - |
+| 转换工具支持的其他算子 | - | Constant, Atan, Asin, Tan, Loop, Dropout, If, Identity, Int8GivenIntTensorFill, Int8GivenTensorFill, Int8Quantize, Int8Dequantize, LpNormalization | 转换工具支持,但不需要具体实现的算子,一般这类算子在转化工具中被优化而消失,如被融合掉或者使用其他算子代替。 |
diff --git a/docs/lite/docs/source_zh_cn/reference/operator_list_lite_for_tensorflow.md b/docs/lite/docs/source_zh_cn/reference/operator_list_lite_for_tensorflow.md
new file mode 100644
index 0000000000000000000000000000000000000000..4e3de3aea5624351f019e439eb0c51a3b69491b7
--- /dev/null
+++ b/docs/lite/docs/source_zh_cn/reference/operator_list_lite_for_tensorflow.md
@@ -0,0 +1,119 @@
+# MindSpore Lite支持的TensorFlow算子列表
+
+[](https://gitee.com/mindspore/docs/blob/master/docs/lite/docs/source_zh_cn/reference/operator_list_lite_for_tensorflow.md)
+
+| MindSpore Lite算子名称 | 对应的TensorFlow算子 |
+| ---------------------- | ------------------------------------------------------------ |
+| Abs | Abs |
+| Activation | Activation, Elu, Relu, Relu6, Sigmoid, Tanh, Selu, LeakyRelu, Softplus |
+| Adam | Adam |
+| AddFusion | Add, AddV2 |
+| All | All |
+| ApplyMomentum | ApplyMomentum |
+| Assert | Assert |
+| Assign | Assign |
+| ArgmaxFusion | ArgMax |
+| ArgminFusion | ArgMin |
+| AvgPoolFusion | AvgPool |
+| BatchToSpace | BatchToSpace |
+| BatchToSpaceND | BatchToSpaceND |
+| BiasAdd | BiasAdd |
+| BinaryCrossEntropy | BinaryCrossEntropy |
+| BroadcastTo | BroadcastTo |
+| Cast | Cast |
+| Ceil | Ceil |
+| Clip | Clip |
+| Concat | ConcatV2 |
+| Conv2DFusion | Conv2D |
+| Conv2dTransposeFusion | Conv2DBackpropInput |
+| Cos | Cos |
+| CropAndResize | CropAndResize |
+| CumSum | Cumsum |
+| DepthToSpace | DepthToSpace |
+| DivFusion | Div, RealDiv |
+| Dropout | Dropout |
+| Elu | NonMaxSuppressionV3 |
+| Equal | Equal |
+| Erf | Erf |
+| ExpFusion | Exp |
+| ExpandDims | ExpandDims |
+| Fill | Fill |
+| Floor | Floor |
+| FloorDiv | FloorDiv |
+| FloorMod | FloorMod |
+| FusedBatchNorm | FusedBatchNorm,
FusedBatchNormV3 |
+| GatherNd | GatherNd |
+| Gather | GatherV2 |
+| Greater | Greater |
+| GreaterEqual | GreaterEqual |
+| InvertPermutation | InvertPermutation |
+| IsFinite | IsFinite |
+| LeakyReLU | LeakyRelu |
+| Less | Less |
+| LessEqual | LessEqual |
+| Log | Log |
+| Log1p | Log1p |
+| LogicalAnd | LogicalAnd |
+| LogicalNot | LogicalNot |
+| LogicalOr | LogicalOr |
+| MatMulFusion | MatMul,
BatchMatMul,
BatchMatMulV2 |
+| Maximum | Maximum |
+| MaxPoolFusion | MaxPool |
+| Merge | Merge |
+| Minimum | Minimum |
+| Mod | Mod |
+| MulFusion | Mul |
+| Neg | Neg |
+| NotEqual | NotEqual |
+| NonMaxSupppression | NonMaxSupppression |
+| NonZero | NonZero |
+| OneHot | OneHot |
+| OnesLike | OnesLike |
+| PadFusion | MirrorPad, Pad, PadV2 |
+| PowFusion | Pow |
+| RaggedRange | RaggedRange |
+| RandomNormal | RandomNormal |
+| RandomStandardNormal | RandomStandardNormal |
+| Range | Range |
+| Rank | Rank |
+| ReduceFusion | Sum, Max, Min, Mean, Prod, All |
+| Reshape | Reshape |
+| Resize | ResizeBilinear,
ResizeBicubic,
ResizeNearestNeighbor |
+| ReverseV2 | ReverseV2 |
+| ReverseSequence | ReverseSequence |
+| Round | Round |
+| Rsqrt | Rsqrt |
+| Select | Select |
+| Selu | Selu |
+| SGD | SGD |
+| Shape | Shape |
+| Sin | Sin |
+| Size | Size |
+| SliceFusion | Slice |
+| Softmax | Softmax |
+| Softplus | Softplus |
+| SpaceToBatchND | SpaceToBatchND |
+| Split | Split, SplitV |
+| Sqrt | Sqrt |
+| Square | Square |
+| SquaredDifference | SquaredDifference |
+| Squeeze | Squeeze |
+| StridedSlice | StridedSlice |
+| Stack | Pack |
+| SubFusion | Sub |
+| Switch | Switch |
+| TensorListFromTensor | TensorListFromTensor |
+| TensorListGetItem | TensorListGetItem |
+| TensorListReserve | TensorListReserve |
+| TensorListSetItem | TensorListSetItem |
+| TensorListStack | TensorListStack |
+| TensorScatterAdd | TensorScatterAdd |
+| TileFusion | Tile |
+| TopKFusion | TopKV2 |
+| Transpose | Transpose |
+| UnsortedSegmentSum | UnsortedSegmentSum |
+| Where | Where |
+| ZerosLike | ZerosLike |
+| 转换工具支持的其他算子 | Dropout, Enter,
Exit, If,
LinSpace,
LoopCond,
NextIteration,
StatelessIf,
StatelessWhile,
TensorArrayGatherV3,
TensorArrayReadV3,
TensorArrayScatterV3,
TensorArraySizeV3,
TensorArrayV3,
TensorArrayWriteV3,
While |
+
+> - [转换工具](https://www.mindspore.cn/lite/docs/zh-CN/master/converter/converter_tool.html)支持,但不需要具体实现的算子,一般这类算子在转化工具中被优化而消失,如被融合掉或者使用其他算子代替。
diff --git a/docs/lite/docs/source_zh_cn/reference/operator_list_lite_for_tflite.md b/docs/lite/docs/source_zh_cn/reference/operator_list_lite_for_tflite.md
new file mode 100644
index 0000000000000000000000000000000000000000..a5a2b429b2e25b394a2b9fd0b5f3365b81ecd562
--- /dev/null
+++ b/docs/lite/docs/source_zh_cn/reference/operator_list_lite_for_tflite.md
@@ -0,0 +1,116 @@
+# MindSpore Lite支持的TensorFlow Lite算子列表
+
+[](https://gitee.com/mindspore/docs/blob/master/docs/lite/docs/source_zh_cn/reference/operator_list_lite_for_tflite.md)
+
+| MindSpore Lite算子名称 | 对应的TensorFlow Lite算子 |
+| ---------------------- | ------------------------------------------------------------ |
+| Abs | Abs |
+| Activation | Activation, ReLU, ReLU6, PReLU,
LeakyReLU, Tanh, HardSwish, Logistic |
+| Adam | Adam |
+| AddFusion | Add |
+| AddN | AddN |
+| All | All |
+| ApplyMomentum | ApplyMomentum |
+| Assign | Assign |
+| ArgmaxFusion | Argmax |
+| ArgminFusion | Argmin |
+| AvgPoolFusion | MeanPooling |
+| BatchToSpace | BatchToSpace |
+| BatchToSpaceND | BatchToSpaceND |
+| BinaryCrossEntropy | BinaryCrossEntropy |
+| BroadcastTo | BroadcastTo |
+| Cast | Cast,
QUANTIZE,
DEQUANTIZE |
+| Ceil | Ceil |
+| Clip | Clip |
+| Concat | Concat |
+| Conv2DFusion | Conv2D |
+| Conv2dTransposeFusion | DeConv2D |
+| Cos | Cos |
+| CustomExtractFeatures | ExtractFeatures |
+| CustomNormalize | Normalize |
+| CustomPredict | Predict |
+| DepthToSpace | DepthToSpace |
+| DetectionPostProcess | Custom |
+| DivFusion | Div, RealDiv |
+| Dropout | Dropout |
+| Equal | Equal |
+| Erf | Erf |
+| ExpFusion | Exp |
+| ExpandDims | ExpandDims |
+| Fill | Fill |
+| Floor | flOOR |
+| FloorDiv | FloorDiv |
+| FloorMod | FloorMod |
+| FullConnection | FullyConnected |
+| FusedBatchNorm | FusedBatchNorm |
+| GatherNd | GatherND |
+| Gather | Gather |
+| Greater | Greater |
+| GreaterEqual | GreaterEqual |
+| HashtableLookup | HashtableLookup |
+| InstanceNorm | InstanceNorm |
+| IsFinite | IsFinite |
+| LeakyReLU | LeakyRelu |
+| Less | Less |
+| LessEqual | LessEqual |
+| LRN | LocalResponseNorm |
+| Log | Log |
+| Log1p | Log1p |
+| LogicalAnd | LogicalAnd |
+| LogicalNot | LogicalNot |
+| LogicalOr | LogicalOr |
+| LogSoftmax | LogSoftmax |
+| LshProjection | LshProjection |
+| MatMulFusion | BatchMatMul |
+| Maximum | Maximum |
+| MaxPoolFusion | MaxPooling |
+| Minimum | Minimum |
+| Mod | Mod |
+| MulFusion | Mul |
+| Neg | Neg |
+| NotEqual | NotEqual |
+| NonMaxSupppression | NonMaxSupppression |
+| NonZero | NonZero |
+| OneHot | OneHot |
+| OnesLike | OnesLike |
+| PadFusion | Pad, MirrorPad, PadV2 |
+| PowFusion | Pow |
+| PReLUFusion | PRELU |
+| RandomNormal | RandomNormal |
+| Range | Range |
+| Rank | Rank |
+| ReduceFusion | Sum, Mean, ReduceMax, ReduceMin, ReduceProd |
+| Reshape | Reshape |
+| Resize | ResizeBilinear,
NearestNeighbor |
+| ReverseV2 | reverse |
+| ReverseSequence | ReverseSequence |
+| Round | Round |
+| Rsqrt | Rsqrt |
+| ScatterNd | ScatterNd |
+| ScatterNdUpdate | ScatterNdUpdate |
+| SGD | SGD |
+| Shape | Shape |
+| Sin | Sin |
+| SliceFusion | Slice |
+| SkipGram | SKipGram |
+| Softmax | Softmax |
+| SpaceToBatch | SpaceToBatch |
+| SpaceToBatchND | SpaceToBatchND |
+| SpaceToDepth | SpaceToDepth |
+| SparseToDense | SpareToDense |
+| Split | Split, SplitV |
+| Sqrt | Sqrt |
+| Square | Square |
+| SquaredDifference | SquaredDifference |
+| Squeeze | Squeeze |
+| StridedSlice | StridedSlice |
+| Stack | Stack |
+| SubFusion | Sub |
+| TensorScatterAdd | TensorScatterAdd |
+| TileFusion | Tile |
+| TopKFusion | TopKV2 |
+| Transpose | Transpose |
+| Unique | Unique |
+| Unstack | Unstack |
+| Where | Where |
+| ZerosLike | ZerosLike |
diff --git a/docs/lite/docs/source_zh_cn/reference/operator_lite.rst b/docs/lite/docs/source_zh_cn/reference/operator_lite.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ca4fcd1c465b5c7ef0bfb9b4e6dc55371a998c4b
--- /dev/null
+++ b/docs/lite/docs/source_zh_cn/reference/operator_lite.rst
@@ -0,0 +1,15 @@
+Lite算子支持
+===================
+
+.. image:: https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/website-images/master/resource/_static/logo_source.svg
+ :target: https://gitee.com/mindspore/docs/blob/master/docs/lite/docs/source_zh_cn/reference/operateor_list.rst
+ :alt: 查看源文件
+
+.. toctree::
+ :maxdepth: 1
+
+ operator_list_lite
+ operator_list_lite_for_onnx
+ operator_list_lite_for_tflite
+ operator_list_lite_for_tensorflow
+ operator_list_lite_for_caffe
\ No newline at end of file