operator name
stringclasses 180
values | used in model
stringclasses 155
values | args
stringlengths 19
5.24k
|
---|---|---|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54765], i64), T([54765, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54766], i64), T([54766, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54767], i64), T([54767, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54768], i64), T([54768, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54769], i64), T([54769, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54770], i64), T([54770, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54771], i64), T([54771, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54772], i64), T([54772, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54773], i64), T([54773, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54774], i64), T([54774, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54775], i64), T([54775, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54776], i64), T([54776, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54777], i64), T([54777, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54778], i64), T([54778, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54779], i64), T([54779, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54780], i64), T([54780, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54781], i64), T([54781, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54782], i64), T([54782, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54783], i64), T([54783, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54784], i64), T([54784, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54785], i64), T([54785, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54786], i64), T([54786, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54787], i64), T([54787, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54788], i64), T([54788, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54789], i64), T([54789, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54791], i64), T([54791, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54792], i64), T([54792, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54793], i64), T([54793, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54794], i64), T([54794, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54795], i64), T([54795, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54796], i64), T([54796, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54797], i64), T([54797, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54798], i64), T([54798, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54799], i64), T([54799, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54800], i64), T([54800, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54801], i64), T([54801, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54802], i64), T([54802, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54803], i64), T([54803, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54804], i64), T([54804, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54807], i64), T([54807, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54809], i64), T([54809, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54811], i64), T([54811, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54812], i64), T([54812, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54813], i64), T([54813, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54814], i64), T([54814, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54815], i64), T([54815, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54816], i64), T([54816, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54818], i64), T([54818, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54819], i64), T([54819, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54820], i64), T([54820, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54821], i64), T([54821, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54824], i64), T([54824, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54826], i64), T([54826, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54827], i64), T([54827, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54828], i64), T([54828, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54829], i64), T([54829, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54830], i64), T([54830, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54831], i64), T([54831, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54833], i64), T([54833, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54835], i64), T([54835, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54836], i64), T([54836, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54838], i64), T([54838, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54839], i64), T([54839, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54842], i64), T([54842, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54843], i64), T([54843, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54844], i64), T([54844, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54847], i64), T([54847, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54855], i64), T([54855, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._sparse_coo_tensor_with_dims_and_tensors.default
|
TorchBench/fambench_dlrm
|
((1, 1, [965, 192], T([1, 54862], i64), T([54862, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None})
|
aten._to_copy.default
|
TorchBench/vision_maskrcnn
|
((T([0, 4], f16),), {'dtype': f32})
|
aten._to_copy.default
|
TorchBench/vision_maskrcnn
|
((T([0, 4], f16),), {'dtype': i64})
|
aten._to_copy.default
|
TorchBench/vision_maskrcnn
|
((T([0], f32),), {'dtype': i64})
|
aten._to_copy.default
|
HuggingFace/AllenaiLongformerBase
|
((T([1, 1, 1, 1024], f32),), {'dtype': f16})
|
aten._to_copy.default
|
HuggingFace/BigBird
|
((T([1, 1, 1, 1024], f32),), {'dtype': f16})
|
aten._to_copy.default
|
HuggingFace/CamemBert
|
((T([1, 1, 1, 512], f32),), {'dtype': f16})
|
aten._to_copy.default
|
HuggingFace/ElectraForCausalLM
|
((T([1, 1, 1, 512], f32),), {'dtype': f16})
|
aten._to_copy.default
|
HuggingFace/YituTechConvBert
|
((T([1, 1, 1, 512], f32),), {'dtype': f16})
|
aten._to_copy.default
|
HuggingFace/GPT2ForSequenceClassification
|
((T([1, 1, 1024, 1024], u8),), {'dtype': torch.bool})
|
aten._to_copy.default
|
HuggingFace/BigBird
|
((T([1, 1, 1024, 1], f32),), {'dtype': f16})
|
aten._to_copy.default
|
TorchBench/yolov3
|
((T([1, 1, 12, 16, 2], i64),), {'dtype': f32})
|
aten._to_copy.default
|
HuggingFace/BigBird
|
((T([1, 1, 12, 64, 192], f32),), {'dtype': f16})
|
aten._to_copy.default
|
HuggingFace/GPTNeoForCausalLM
|
((T([1, 1, 128, 128], u8, stride=(4194304, 4194304, 2048, 1)),), {'dtype': torch.bool})
|
aten._to_copy.default
|
HuggingFace/GPTNeoForSequenceClassification
|
((T([1, 1, 128, 128], u8, stride=(4194304, 4194304, 2048, 1)),), {'dtype': torch.bool})
|
aten._to_copy.default
|
TorchBench/yolov3
|
((T([1, 1, 24, 32, 2], i64),), {'dtype': f32})
|
aten._to_copy.default
|
TorchBench/yolov3
|
((T([1, 1, 48, 64, 2], i64),), {'dtype': f32})
|
aten._to_copy.default
|
HuggingFace/DebertaV2ForMaskedLM
|
((T([1, 1, 512, 512], f32),), {'dtype': torch.uint8})
|
aten._to_copy.default
|
HuggingFace/DebertaV2ForQuestionAnswering
|
((T([1, 1, 512, 512], f32),), {'dtype': torch.uint8})
|
aten._to_copy.default
|
HuggingFace/DebertaV2ForMaskedLM
|
((T([1, 1, 512, 512], u8),), {'dtype': torch.bool})
|
aten._to_copy.default
|
HuggingFace/DebertaV2ForQuestionAnswering
|
((T([1, 1, 512, 512], u8),), {'dtype': torch.bool})
|
aten._to_copy.default
|
HuggingFace/DistillGPT2
|
((T([1, 1, 512, 512], u8, stride=(1048576, 1048576, 1024, 1)),), {'dtype': torch.bool})
|
aten._to_copy.default
|
TorchBench/hf_GPT2
|
((T([1, 1, 512, 512], u8, stride=(1048576, 1048576, 1024, 1)),), {'dtype': torch.bool})
|
aten._to_copy.default
|
HuggingFace/AllenaiLongformerBase
|
((T([1, 1024, 1, 1], b8),), {'dtype': f16, 'layout': torch.strided, 'device': 'cuda'})
|
aten._to_copy.default
|
HuggingFace/AllenaiLongformerBase
|
((T([1, 1024, 12, 513], f16, stride=(6303744, 513, 525312, 1)),), {'dtype': f32, 'layout': torch.strided, 'device': 'cuda'})
|
aten._to_copy.default
|
HuggingFace/AllenaiLongformerBase
|
((T([1, 1024, 12, 513], f32),), {'dtype': f16, 'layout': torch.strided, 'device': 'cuda'})
|
aten._to_copy.default
|
HuggingFace/AllenaiLongformerBase
|
((T([1, 1024], b8),), {'dtype': i32})
|
aten._to_copy.default
|
HuggingFace/AllenaiLongformerBase
|
((T([1, 1024], i32),), {'dtype': i64})
|
aten._to_copy.default
|
HuggingFace/AllenaiLongformerBase
|
((T([1, 1024], i64),), {'dtype': i32, 'layout': torch.strided, 'device': 'cuda'})
|
aten._to_copy.default
|
HuggingFace/GPTNeoForCausalLM
|
((T([1, 128, 50257], f16),), {'dtype': f32})
|
aten._to_copy.default
|
HuggingFace/GPTNeoForCausalLM
|
((T([1, 128, 50257], f32),), {'dtype': f16, 'layout': torch.strided, 'device': 'cuda'})
|
aten._to_copy.default
|
HuggingFace/GPTNeoForCausalLM
|
((T([1, 128, 50257], f32),), {'dtype': f16})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.