operator name
stringclasses 180
values | used in model
stringclasses 155
values | args
stringlengths 19
5.24k
|
---|---|---|
aten._softmax.default
|
TIMM/crossvit_9_240
|
((T([64, 4, 1, 197], f16), -1, False), {})
|
aten._softmax.default
|
TIMM/crossvit_9_240
|
((T([64, 4, 1, 401], f16), -1, False), {})
|
aten._softmax.default
|
TIMM/jx_nest_base
|
((T([64, 4, 16, 196, 196], f16), -1, False), {})
|
aten._softmax.default
|
TIMM/crossvit_9_240
|
((T([64, 4, 197, 197], f16), -1, False), {})
|
aten._softmax.default
|
TIMM/crossvit_9_240
|
((T([64, 4, 401, 401], f16), -1, False), {})
|
aten._softmax.default
|
HuggingFace/ElectraForQuestionAnswering
|
((T([64, 4, 512, 512], f16), -1, False), {})
|
aten._softmax.default
|
TIMM/pit_b_224
|
((T([64, 4, 962, 962], f16), -1, False), {})
|
aten._softmax.default
|
TIMM/volo_d1_224
|
((T([64, 6, 196, 9, 9], f16, stride=(95256, 81, 486, 9, 1)), -1, False), {})
|
aten._softmax.default
|
TIMM/tnt_s_patch16_224
|
((T([64, 6, 197, 197], f16), -1, False), {})
|
aten._softmax.default
|
TIMM/pit_b_224
|
((T([64, 8, 257, 257], f16), -1, False), {})
|
aten._softmax.default
|
TIMM/jx_nest_base
|
((T([64, 8, 4, 196, 196], f16), -1, False), {})
|
aten._softmax.default
|
TorchBench/hf_Albert
|
((T([8, 12, 512, 512], f16), -1, False), {})
|
aten._softmax.default
|
TorchBench/hf_DistilBert
|
((T([8, 12, 512, 512], f16), -1, False), {})
|
aten._softmax.default
|
HuggingFace/MegatronBertForQuestionAnswering
|
((T([8, 16, 128, 128], f16), -1, False), {})
|
aten._softmax.default
|
TorchBench/timm_vision_transformer
|
((T([8, 6, 197, 197], f16), -1, False), {})
|
aten._softmax.default
|
TorchBench/speech_transformer
|
((T([80, 204, 204], f16), 2, False), {})
|
aten._softmax.default
|
TorchBench/speech_transformer
|
((T([80, 22, 204], f16), 2, False), {})
|
aten._softmax.default
|
TorchBench/speech_transformer
|
((T([80, 22, 22], f16), 2, False), {})
|
aten._softmax.default
|
HuggingFace/PLBartForConditionalGeneration
|
((T([96, 128, 128], f16), -1, False), {})
|
aten._softmax_backward_data.default
|
TorchBench/vision_maskrcnn
|
((T([0, 91], f16), T([0, 91], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/AllenaiLongformerBase
|
((T([1, 1024, 12, 513], f32), T([1, 1024, 12, 513], f32), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/BigBird
|
((T([1, 12, 12, 64, 512], f16), T([1, 12, 12, 64, 512], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/CamemBert
|
((T([1, 12, 512, 512], f16), T([1, 12, 512, 512], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/DistillGPT2
|
((T([1, 12, 512, 512], f16), T([1, 12, 512, 512], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/BigBird
|
((T([1, 12, 64, 1024], f16), T([1, 12, 64, 1024], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/BigBird
|
((T([1, 12, 64, 448], f16), T([1, 12, 64, 448], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/GPTNeoForCausalLM
|
((T([1, 16, 128, 128], f32), T([1, 16, 128, 128], f32), -1, f32), {})
|
aten._softmax_backward_data.default
|
HuggingFace/GPTNeoForSequenceClassification
|
((T([1, 16, 128, 128], f32), T([1, 16, 128, 128], f32), -1, f32), {})
|
aten._softmax_backward_data.default
|
HuggingFace/DebertaV2ForMaskedLM
|
((T([1, 24, 512, 512], f16), T([1, 24, 512, 512], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/DebertaV2ForQuestionAnswering
|
((T([1, 24, 512, 512], f16), T([1, 24, 512, 512], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/ElectraForCausalLM
|
((T([1, 4, 512, 512], f16), T([1, 4, 512, 512], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/YituTechConvBert
|
((T([1, 6, 512, 512], f16), T([1, 6, 512, 512], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/eca_halonext26ts
|
((T([1024, 1, 64, 144], f16), T([1024, 1, 64, 144], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/BlenderbotSmallForCausalLM
|
((T([1024, 128, 128], f16), T([1024, 128, 128], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/BlenderbotSmallForConditionalGeneration
|
((T([1024, 128, 128], f16), T([1024, 128, 128], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/eca_halonext26ts
|
((T([1024, 4, 16, 144], f16), T([1024, 4, 16, 144], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/eca_halonext26ts
|
((T([1024, 4, 64, 144], f16), T([1024, 4, 64, 144], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/swin_base_patch4_window7_224
|
((T([1024, 8, 49, 49], f16), T([1024, 8, 49, 49], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/tnt_s_patch16_224
|
((T([12544, 4, 16, 16], f16), T([12544, 4, 16, 16], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/levit_128
|
((T([128, 12, 16, 16], f16), T([128, 12, 16, 16], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/MBartForConditionalGeneration
|
((T([128, 128, 128], f16), T([128, 128, 128], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/PegasusForCausalLM
|
((T([128, 128, 128], f16), T([128, 128, 128], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/TrOCRForCausalLM
|
((T([128, 128, 128], f16), T([128, 128, 128], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/levit_128
|
((T([128, 16, 16, 49], f16), T([128, 16, 16, 49], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/levit_128
|
((T([128, 4, 196, 196], f16), T([128, 4, 196, 196], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/visformer_small
|
((T([128, 6, 196, 196], f16), T([128, 6, 196, 196], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/visformer_small
|
((T([128, 6, 49, 49], f16), T([128, 6, 49, 49], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/coat_lite_mini
|
((T([128, 8, 197, 40], f16, stride=(63040, 7880, 1, 197)), T([128, 8, 197, 40], f16), 2, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/coat_lite_mini
|
((T([128, 8, 3137, 8], f16, stride=(200768, 25096, 1, 3137)), T([128, 8, 3137, 8], f16), 2, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/levit_128
|
((T([128, 8, 49, 196], f16), T([128, 8, 49, 196], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/levit_128
|
((T([128, 8, 49, 49], f16), T([128, 8, 49, 49], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/coat_lite_mini
|
((T([128, 8, 50, 64], f16, stride=(25600, 3200, 1, 50)), T([128, 8, 50, 64], f16), 2, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/coat_lite_mini
|
((T([128, 8, 785, 16], f16, stride=(100480, 12560, 1, 785)), T([128, 8, 785, 16], f16), 2, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/DistilBertForMaskedLM
|
((T([16, 12, 128, 128], f16), T([16, 12, 128, 128], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TorchBench/BERT_pytorch
|
((T([16, 12, 128, 128], f16), T([16, 12, 128, 128], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/LayoutLMForMaskedLM
|
((T([16, 12, 512, 512], f16), T([16, 12, 512, 512], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/LayoutLMForSequenceClassification
|
((T([16, 12, 512, 512], f16), T([16, 12, 512, 512], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/MobileBertForMaskedLM
|
((T([16, 4, 128, 128], f16), T([16, 4, 128, 128], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/PLBartForCausalLM
|
((T([192, 128, 128], f16), T([192, 128, 128], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TorchBench/hf_Longformer
|
((T([2, 1024, 12, 513], f32), T([2, 1024, 12, 513], f32), -1, f16), {})
|
aten._softmax_backward_data.default
|
TorchBench/hf_BigBird
|
((T([2, 12, 12, 64, 512], f16), T([2, 12, 12, 64, 512], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TorchBench/hf_BigBird
|
((T([2, 12, 64, 1024], f16), T([2, 12, 64, 1024], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TorchBench/hf_BigBird
|
((T([2, 12, 64, 448], f16), T([2, 12, 64, 448], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/cait_m36_384
|
((T([2, 16, 1, 577], f16), T([2, 16, 1, 577], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/MegatronBertForCausalLM
|
((T([2, 16, 128, 128], f16), T([2, 16, 128, 128], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/cait_m36_384
|
((T([2, 16, 576, 576], f16, stride=(5308416, 1, 9216, 16)), T([2, 16, 576, 576], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/AlbertForMaskedLM
|
((T([2, 64, 512, 512], f16), T([2, 64, 512, 512], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/AlbertForQuestionAnswering
|
((T([2, 64, 512, 512], f16), T([2, 64, 512, 512], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/sebotnet33ts_256
|
((T([256, 1024, 1024], f16), T([256, 1024, 1024], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/MBartForCausalLM
|
((T([256, 128, 128], f16), T([256, 128, 128], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/Speech2Text2ForCausalLM
|
((T([256, 128, 128], f16), T([256, 128, 128], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/swin_base_patch4_window7_224
|
((T([256, 16, 49, 49], f16), T([256, 16, 49, 49], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/sebotnet33ts_256
|
((T([256, 256, 256], f16), T([256, 256, 256], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/mobilevit_s
|
((T([256, 4, 16, 16], f16), T([256, 4, 16, 16], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/mobilevit_s
|
((T([256, 4, 256, 256], f16), T([256, 4, 256, 256], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/mobilevit_s
|
((T([256, 4, 64, 64], f16), T([256, 4, 64, 64], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/sebotnet33ts_256
|
((T([256, 64, 64], f16), T([256, 64, 64], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TorchBench/attention_is_all_you_need_pytorch
|
((T([256, 8, 31, 31], f16), T([256, 8, 31, 31], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TorchBench/attention_is_all_you_need_pytorch
|
((T([256, 8, 31, 33], f16), T([256, 8, 31, 33], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TorchBench/attention_is_all_you_need_pytorch
|
((T([256, 8, 33, 33], f16), T([256, 8, 33, 33], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/YituTechConvBert
|
((T([3072, 9, 1], f16), T([3072, 9, 1], f16), 1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/twins_pcpvt_base
|
((T([32, 1, 3136, 49], f16), T([32, 1, 3136, 49], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/BartForConditionalGeneration
|
((T([32, 1024, 1024], f16), T([32, 1024, 1024], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/DistilBertForQuestionAnswering
|
((T([32, 12, 128, 128], f16), T([32, 12, 128, 128], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/M2M100ForConditionalGeneration
|
((T([32, 128, 128], f16), T([32, 128, 128], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/XGLMForCausalLM
|
((T([32, 128, 128], f16), T([32, 128, 128], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/resnest101e
|
((T([32, 2, 1, 128], f16), T([32, 2, 1, 128], f16), 1, f16), {})
|
aten._softmax_backward_data.default
|
TorchBench/timm_resnest
|
((T([32, 2, 1, 128], f16), T([32, 2, 1, 128], f16), 1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/resnest101e
|
((T([32, 2, 1, 256], f16), T([32, 2, 1, 256], f16), 1, f16), {})
|
aten._softmax_backward_data.default
|
TorchBench/timm_resnest
|
((T([32, 2, 1, 256], f16), T([32, 2, 1, 256], f16), 1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/resnest101e
|
((T([32, 2, 1, 512], f16), T([32, 2, 1, 512], f16), 1, f16), {})
|
aten._softmax_backward_data.default
|
TorchBench/timm_resnest
|
((T([32, 2, 1, 512], f16), T([32, 2, 1, 512], f16), 1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/resnest101e
|
((T([32, 2, 1, 64], f16), T([32, 2, 1, 64], f16), 1, f16), {})
|
aten._softmax_backward_data.default
|
TorchBench/timm_resnest
|
((T([32, 2, 1, 64], f16), T([32, 2, 1, 64], f16), 1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/twins_pcpvt_base
|
((T([32, 2, 784, 49], f16), T([32, 2, 784, 49], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/MobileBertForQuestionAnswering
|
((T([32, 4, 128, 128], f16), T([32, 4, 128, 128], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/twins_pcpvt_base
|
((T([32, 5, 196, 49], f16), T([32, 5, 196, 49], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
TIMM/twins_pcpvt_base
|
((T([32, 8, 49, 49], f16), T([32, 8, 49, 49], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/GPT2ForSequenceClassification
|
((T([4, 12, 1024, 1024], f16), T([4, 12, 1024, 1024], f16), -1, f16), {})
|
aten._softmax_backward_data.default
|
HuggingFace/RobertaForCausalLM
|
((T([4, 12, 128, 128], f16), T([4, 12, 128, 128], f16), -1, f16), {})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.