Adding ONNX file of this model

#3
README.md CHANGED
@@ -9,15 +9,19 @@ tags:
9
  - embeddings
10
  - mteb
11
  - transformers
 
 
12
  model-index:
13
  - name: ibm-granite/granite-embedding-30m-english
14
  results:
15
- - dataset:
16
- config: en-ext
 
17
  name: MTEB AmazonCounterfactualClassification (en-ext)
18
- revision: e8379541af4e31359cca9fbcf4b00f2671dba205
19
- split: test
20
  type: mteb/amazon_counterfactual
 
 
 
21
  metrics:
22
  - type: accuracy
23
  value: 62.856100000000005
@@ -31,14 +35,14 @@ model-index:
31
  value: 15.4995
32
  - type: main_score
33
  value: 62.856100000000005
34
- task:
35
  type: Classification
36
- - dataset:
37
- config: en
38
  name: MTEB AmazonCounterfactualClassification (en)
39
- revision: e8379541af4e31359cca9fbcf4b00f2671dba205
40
- split: test
41
  type: mteb/amazon_counterfactual
 
 
 
42
  metrics:
43
  - type: accuracy
44
  value: 60.925399999999996
@@ -52,14 +56,14 @@ model-index:
52
  value: 25.0517
53
  - type: main_score
54
  value: 60.925399999999996
55
- task:
56
  type: Classification
57
- - dataset:
58
- config: default
59
  name: MTEB AmazonPolarityClassification (default)
60
- revision: e2d317d38cd51312af73b3d32a06d1a08b442046
61
- split: test
62
  type: mteb/amazon_polarity
 
 
 
63
  metrics:
64
  - type: accuracy
65
  value: 62.983599999999996
@@ -73,14 +77,14 @@ model-index:
73
  value: 58.3423
74
  - type: main_score
75
  value: 62.983599999999996
76
- task:
77
  type: Classification
78
- - dataset:
79
- config: en
80
  name: MTEB AmazonReviewsClassification (en)
81
- revision: 1399c76144fd37290681b995c656ef9b2e06e26d
82
- split: test
83
  type: mteb/amazon_reviews_multi
 
 
 
84
  metrics:
85
  - type: accuracy
86
  value: 32.178000000000004
@@ -90,14 +94,14 @@ model-index:
90
  value: 31.5201
91
  - type: main_score
92
  value: 32.178000000000004
93
- task:
94
- type: Classification
95
- - dataset:
96
- config: default
97
  name: MTEB AppsRetrieval (default)
98
- revision: f22508f96b7a36c2415181ed8bb76f76e04ae2d5
99
- split: test
100
  type: CoIR-Retrieval/apps
 
 
 
101
  metrics:
102
  - type: ndcg_at_1
103
  value: 3.5060000000000002
@@ -381,14 +385,14 @@ model-index:
381
  value: 29.7236
382
  - type: main_score
383
  value: 6.203
384
- task:
385
  type: Retrieval
386
- - dataset:
387
- config: default
388
  name: MTEB ArguAna (default)
389
- revision: c22ab2a51041ffd869aaddef7af8d8215647e41a
390
- split: test
391
  type: mteb/arguana
 
 
 
392
  metrics:
393
  - type: ndcg_at_1
394
  value: 31.791999999999998
@@ -672,14 +676,14 @@ model-index:
672
  value: 11.3251
673
  - type: main_score
674
  value: 56.355999999999995
675
- task:
676
- type: Retrieval
677
- - dataset:
678
- config: default
679
  name: MTEB ArxivClusteringP2P (default)
680
- revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d
681
- split: test
682
  type: mteb/arxiv-clustering-p2p
 
 
 
683
  metrics:
684
  - type: v_measure
685
  value: 46.813
@@ -687,14 +691,14 @@ model-index:
687
  value: 13.830899999999998
688
  - type: main_score
689
  value: 46.813
690
- task:
691
  type: Clustering
692
- - dataset:
693
- config: default
694
  name: MTEB ArxivClusteringS2S (default)
695
- revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53
696
- split: test
697
  type: mteb/arxiv-clustering-s2s
 
 
 
698
  metrics:
699
  - type: v_measure
700
  value: 41.9895
@@ -702,14 +706,14 @@ model-index:
702
  value: 14.3004
703
  - type: main_score
704
  value: 41.9895
705
- task:
706
- type: Clustering
707
- - dataset:
708
- config: default
709
  name: MTEB AskUbuntuDupQuestions (default)
710
- revision: 2000358ca161889fa9c082cb41daa8dcfb161a54
711
- split: test
712
  type: mteb/askubuntudupquestions-reranking
 
 
 
713
  metrics:
714
  - type: map
715
  value: 64.1329
@@ -729,14 +733,14 @@ model-index:
729
  value: 23.508699999999997
730
  - type: main_score
731
  value: 64.1329
732
- task:
733
- type: Reranking
734
- - dataset:
735
- config: default
736
  name: MTEB BIOSSES (default)
737
- revision: d3fb88f8f02e40887cd149695127462bbcf29b4a
738
- split: test
739
  type: mteb/biosses-sts
 
 
 
740
  metrics:
741
  - type: pearson
742
  value: 90.2058
@@ -756,14 +760,14 @@ model-index:
756
  value: 88.1641
757
  - type: main_score
758
  value: 88.1641
759
- task:
760
- type: STS
761
- - dataset:
762
- config: default
763
  name: MTEB Banking77Classification (default)
764
- revision: 0fd18e25b25c072e09e0d92ab615fda904d66300
765
- split: test
766
  type: mteb/banking77
 
 
 
767
  metrics:
768
  - type: accuracy
769
  value: 77.3247
@@ -773,14 +777,14 @@ model-index:
773
  value: 76.3532
774
  - type: main_score
775
  value: 77.3247
776
- task:
777
- type: Classification
778
- - dataset:
779
- config: default
780
  name: MTEB BiorxivClusteringP2P (default)
781
- revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40
782
- split: test
783
  type: mteb/biorxiv-clustering-p2p
 
 
 
784
  metrics:
785
  - type: v_measure
786
  value: 39.018
@@ -788,14 +792,14 @@ model-index:
788
  value: 0.7512
789
  - type: main_score
790
  value: 39.018
791
- task:
792
  type: Clustering
793
- - dataset:
794
- config: default
795
  name: MTEB BiorxivClusteringS2S (default)
796
- revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908
797
- split: test
798
  type: mteb/biorxiv-clustering-s2s
 
 
 
799
  metrics:
800
  - type: v_measure
801
  value: 36.8097
@@ -803,14 +807,14 @@ model-index:
803
  value: 0.9368
804
  - type: main_score
805
  value: 36.8097
806
- task:
807
- type: Clustering
808
- - dataset:
809
- config: python
810
  name: MTEB COIRCodeSearchNetRetrieval (python)
811
- revision: 4adc7bc41202b5c13543c9c886a25f340634dab3
812
- split: test
813
  type: CoIR-Retrieval/CodeSearchNet
 
 
 
814
  metrics:
815
  - type: ndcg_at_1
816
  value: 85.353
@@ -1094,14 +1098,14 @@ model-index:
1094
  value: 88.457
1095
  - type: main_score
1096
  value: 90.89699999999999
1097
- task:
1098
  type: Retrieval
1099
- - dataset:
1100
- config: javascript
1101
  name: MTEB COIRCodeSearchNetRetrieval (javascript)
1102
- revision: 4adc7bc41202b5c13543c9c886a25f340634dab3
1103
- split: test
1104
  type: CoIR-Retrieval/CodeSearchNet
 
 
 
1105
  metrics:
1106
  - type: ndcg_at_1
1107
  value: 35.46
@@ -1385,14 +1389,14 @@ model-index:
1385
  value: 58.6477
1386
  - type: main_score
1387
  value: 46.54
1388
- task:
1389
  type: Retrieval
1390
- - dataset:
1391
- config: go
1392
  name: MTEB COIRCodeSearchNetRetrieval (go)
1393
- revision: 4adc7bc41202b5c13543c9c886a25f340634dab3
1394
- split: test
1395
  type: CoIR-Retrieval/CodeSearchNet
 
 
 
1396
  metrics:
1397
  - type: ndcg_at_1
1398
  value: 45.728
@@ -1676,14 +1680,14 @@ model-index:
1676
  value: 58.7689
1677
  - type: main_score
1678
  value: 59.471
1679
- task:
1680
  type: Retrieval
1681
- - dataset:
1682
- config: ruby
1683
  name: MTEB COIRCodeSearchNetRetrieval (ruby)
1684
- revision: 4adc7bc41202b5c13543c9c886a25f340634dab3
1685
- split: test
1686
  type: CoIR-Retrieval/CodeSearchNet
 
 
 
1687
  metrics:
1688
  - type: ndcg_at_1
1689
  value: 38.144
@@ -1967,14 +1971,14 @@ model-index:
1967
  value: 52.5296
1968
  - type: main_score
1969
  value: 50.166
1970
- task:
1971
  type: Retrieval
1972
- - dataset:
1973
- config: java
1974
  name: MTEB COIRCodeSearchNetRetrieval (java)
1975
- revision: 4adc7bc41202b5c13543c9c886a25f340634dab3
1976
- split: test
1977
  type: CoIR-Retrieval/CodeSearchNet
 
 
 
1978
  metrics:
1979
  - type: ndcg_at_1
1980
  value: 42.355
@@ -2258,14 +2262,14 @@ model-index:
2258
  value: 57.312
2259
  - type: main_score
2260
  value: 55.062
2261
- task:
2262
  type: Retrieval
2263
- - dataset:
2264
- config: php
2265
  name: MTEB COIRCodeSearchNetRetrieval (php)
2266
- revision: 4adc7bc41202b5c13543c9c886a25f340634dab3
2267
- split: test
2268
  type: CoIR-Retrieval/CodeSearchNet
 
 
 
2269
  metrics:
2270
  - type: ndcg_at_1
2271
  value: 36.835
@@ -2549,14 +2553,14 @@ model-index:
2549
  value: 53.6044
2550
  - type: main_score
2551
  value: 49.784
2552
- task:
2553
  type: Retrieval
2554
- - dataset:
2555
- config: default
2556
  name: MTEB CQADupstackAndroidRetrieval (default)
2557
- revision: f46a197baaae43b4f621051089b82a364682dfeb
2558
- split: test
2559
  type: mteb/cqadupstack-android
 
 
 
2560
  metrics:
2561
  - type: ndcg_at_1
2562
  value: 44.206
@@ -2840,14 +2844,14 @@ model-index:
2840
  value: 48.1413
2841
  - type: main_score
2842
  value: 54.106
2843
- task:
2844
  type: Retrieval
2845
- - dataset:
2846
- config: default
2847
  name: MTEB CQADupstackEnglishRetrieval (default)
2848
- revision: ad9991cb51e31e31e430383c75ffb2885547b5f0
2849
- split: test
2850
  type: mteb/cqadupstack-english
 
 
 
2851
  metrics:
2852
  - type: ndcg_at_1
2853
  value: 41.274
@@ -3131,14 +3135,14 @@ model-index:
3131
  value: 47.7803
3132
  - type: main_score
3133
  value: 50.251000000000005
3134
- task:
3135
  type: Retrieval
3136
- - dataset:
3137
- config: default
3138
  name: MTEB CQADupstackGamingRetrieval (default)
3139
- revision: 4885aa143210c98657558c04aaf3dc47cfb54340
3140
- split: test
3141
  type: mteb/cqadupstack-gaming
 
 
 
3142
  metrics:
3143
  - type: ndcg_at_1
3144
  value: 47.147
@@ -3422,14 +3426,14 @@ model-index:
3422
  value: 49.977
3423
  - type: main_score
3424
  value: 59.318000000000005
3425
- task:
3426
  type: Retrieval
3427
- - dataset:
3428
- config: default
3429
  name: MTEB CQADupstackGisRetrieval (default)
3430
- revision: 5003b3064772da1887988e05400cf3806fe491f2
3431
- split: test
3432
  type: mteb/cqadupstack-gis
 
 
 
3433
  metrics:
3434
  - type: ndcg_at_1
3435
  value: 30.734
@@ -3713,14 +3717,14 @@ model-index:
3713
  value: 41.652899999999995
3714
  - type: main_score
3715
  value: 43.564
3716
- task:
3717
  type: Retrieval
3718
- - dataset:
3719
- config: default
3720
  name: MTEB CQADupstackMathematicaRetrieval (default)
3721
- revision: 90fceea13679c63fe563ded68f3b6f06e50061de
3722
- split: test
3723
  type: mteb/cqadupstack-mathematica
 
 
 
3724
  metrics:
3725
  - type: ndcg_at_1
3726
  value: 22.886
@@ -4004,14 +4008,14 @@ model-index:
4004
  value: 29.7639
4005
  - type: main_score
4006
  value: 32.749
4007
- task:
4008
  type: Retrieval
4009
- - dataset:
4010
- config: default
4011
  name: MTEB CQADupstackPhysicsRetrieval (default)
4012
- revision: 79531abbd1fb92d06c6d6315a0cbbbf5bb247ea4
4013
- split: test
4014
  type: mteb/cqadupstack-physics
 
 
 
4015
  metrics:
4016
  - type: ndcg_at_1
4017
  value: 38.114
@@ -4295,14 +4299,14 @@ model-index:
4295
  value: 52.337500000000006
4296
  - type: main_score
4297
  value: 48.339999999999996
4298
- task:
4299
  type: Retrieval
4300
- - dataset:
4301
- config: default
4302
  name: MTEB CQADupstackProgrammersRetrieval (default)
4303
- revision: 6184bc1440d2dbc7612be22b50686b8826d22b32
4304
- split: test
4305
  type: mteb/cqadupstack-programmers
 
 
 
4306
  metrics:
4307
  - type: ndcg_at_1
4308
  value: 34.247
@@ -4586,14 +4590,14 @@ model-index:
4586
  value: 44.7656
4587
  - type: main_score
4588
  value: 44.065
4589
- task:
4590
  type: Retrieval
4591
- - dataset:
4592
- config: default
4593
  name: MTEB CQADupstackRetrieval (default)
4594
- revision: 160c094312a0e1facb97e55eeddb698c0abe3571
4595
- split: test
4596
  type: CQADupstackRetrieval_is_a_combined_dataset
 
 
 
4597
  metrics:
4598
  - type: ndcg_at_1
4599
  value: 33.917750000000005
@@ -4877,27 +4881,27 @@ model-index:
4877
  value: 45.21384166666667
4878
  - type: main_score
4879
  value: 44.29191666666667
4880
- task:
4881
  type: Retrieval
4882
- - dataset:
4883
- config: default
4884
  name: MTEB CQADupstackRetrieval (default)
4885
- revision: CQADupstackRetrieval_is_a_combined_dataset
4886
- split: test
4887
  type: CQADupstackRetrieval_is_a_combined_dataset
 
 
 
4888
  metrics:
4889
  - type: main_score
4890
  value: 44.29191666666667
4891
  - type: ndcg_at_10
4892
  value: 44.29191666666667
4893
- task:
4894
  type: Retrieval
4895
- - dataset:
4896
- config: default
4897
  name: MTEB CQADupstackStatsRetrieval (default)
4898
- revision: 65ac3a16b8e91f9cee4c9828cc7c335575432a2a
4899
- split: test
4900
  type: mteb/cqadupstack-stats
 
 
 
4901
  metrics:
4902
  - type: ndcg_at_1
4903
  value: 29.141000000000002
@@ -5181,14 +5185,14 @@ model-index:
5181
  value: 51.8107
5182
  - type: main_score
5183
  value: 38.596000000000004
5184
- task:
5185
  type: Retrieval
5186
- - dataset:
5187
- config: default
5188
  name: MTEB CQADupstackTexRetrieval (default)
5189
- revision: 46989137a86843e03a6195de44b09deda022eec7
5190
- split: test
5191
  type: mteb/cqadupstack-tex
 
 
 
5192
  metrics:
5193
  - type: ndcg_at_1
5194
  value: 24.054000000000002
@@ -5472,14 +5476,14 @@ model-index:
5472
  value: 38.0747
5473
  - type: main_score
5474
  value: 33.722
5475
- task:
5476
  type: Retrieval
5477
- - dataset:
5478
- config: default
5479
  name: MTEB CQADupstackUnixRetrieval (default)
5480
- revision: 6c6430d3a6d36f8d2a829195bc5dc94d7e063e53
5481
- split: test
5482
  type: mteb/cqadupstack-unix
 
 
 
5483
  metrics:
5484
  - type: ndcg_at_1
5485
  value: 35.168
@@ -5763,14 +5767,14 @@ model-index:
5763
  value: 47.6603
5764
  - type: main_score
5765
  value: 46.071
5766
- task:
5767
  type: Retrieval
5768
- - dataset:
5769
- config: default
5770
  name: MTEB CQADupstackWebmastersRetrieval (default)
5771
- revision: 160c094312a0e1facb97e55eeddb698c0abe3571
5772
- split: test
5773
  type: mteb/cqadupstack-webmasters
 
 
 
5774
  metrics:
5775
  - type: ndcg_at_1
5776
  value: 33.794000000000004
@@ -6054,14 +6058,14 @@ model-index:
6054
  value: 47.1571
6055
  - type: main_score
6056
  value: 43.832
6057
- task:
6058
  type: Retrieval
6059
- - dataset:
6060
- config: default
6061
  name: MTEB CQADupstackWordpressRetrieval (default)
6062
- revision: 4ffe81d471b1924886b33c7567bfb200e9eec5c4
6063
- split: test
6064
  type: mteb/cqadupstack-wordpress
 
 
 
6065
  metrics:
6066
  - type: ndcg_at_1
6067
  value: 26.247999999999998
@@ -6345,14 +6349,14 @@ model-index:
6345
  value: 43.4448
6346
  - type: main_score
6347
  value: 36.889
6348
- task:
6349
  type: Retrieval
6350
- - dataset:
6351
- config: default
6352
  name: MTEB ClimateFEVER (default)
6353
- revision: 47f2ac6acb640fc46020b02a5b59fdda04d39380
6354
- split: test
6355
  type: mteb/climate-fever
 
 
 
6356
  metrics:
6357
  - type: ndcg_at_1
6358
  value: 29.837000000000003
@@ -6636,14 +6640,14 @@ model-index:
6636
  value: 19.817
6637
  - type: main_score
6638
  value: 30.263
6639
- task:
6640
  type: Retrieval
6641
- - dataset:
6642
- config: default
6643
  name: MTEB CodeFeedbackMT (default)
6644
- revision: b0f12fa0c0dd67f59c95a5c33d02aeeb4c398c5f
6645
- split: test
6646
  type: CoIR-Retrieval/codefeedback-mt
 
 
 
6647
  metrics:
6648
  - type: ndcg_at_1
6649
  value: 27.002
@@ -6927,14 +6931,14 @@ model-index:
6927
  value: 46.646300000000004
6928
  - type: main_score
6929
  value: 37.757000000000005
6930
- task:
6931
  type: Retrieval
6932
- - dataset:
6933
- config: default
6934
  name: MTEB CodeFeedbackST (default)
6935
- revision: d213819e87aab9010628da8b73ab4eb337c89340
6936
- split: test
6937
  type: CoIR-Retrieval/codefeedback-st
 
 
 
6938
  metrics:
6939
  - type: ndcg_at_1
6940
  value: 53.335
@@ -7218,14 +7222,14 @@ model-index:
7218
  value: 66.3142
7219
  - type: main_score
7220
  value: 69.425
7221
- task:
7222
  type: Retrieval
7223
- - dataset:
7224
- config: python
7225
  name: MTEB CodeSearchNetCCRetrieval (python)
7226
- revision: 6e1effa2c03723c5fde48ee912b5ee08d4f211e8
7227
- split: test
7228
  type: CoIR-Retrieval/CodeSearchNet-ccr
 
 
 
7229
  metrics:
7230
  - type: ndcg_at_1
7231
  value: 39.395
@@ -7509,14 +7513,14 @@ model-index:
7509
  value: 52.254900000000006
7510
  - type: main_score
7511
  value: 53.593999999999994
7512
- task:
7513
  type: Retrieval
7514
- - dataset:
7515
- config: javascript
7516
  name: MTEB CodeSearchNetCCRetrieval (javascript)
7517
- revision: 6e1effa2c03723c5fde48ee912b5ee08d4f211e8
7518
- split: test
7519
  type: CoIR-Retrieval/CodeSearchNet-ccr
 
 
 
7520
  metrics:
7521
  - type: ndcg_at_1
7522
  value: 39.593
@@ -7800,14 +7804,14 @@ model-index:
7800
  value: 52.0697
7801
  - type: main_score
7802
  value: 53.1
7803
- task:
7804
  type: Retrieval
7805
- - dataset:
7806
- config: go
7807
  name: MTEB CodeSearchNetCCRetrieval (go)
7808
- revision: 6e1effa2c03723c5fde48ee912b5ee08d4f211e8
7809
- split: test
7810
  type: CoIR-Retrieval/CodeSearchNet-ccr
 
 
 
7811
  metrics:
7812
  - type: ndcg_at_1
7813
  value: 30.459999999999997
@@ -8091,14 +8095,14 @@ model-index:
8091
  value: 46.942499999999995
8092
  - type: main_score
8093
  value: 42.094
8094
- task:
8095
  type: Retrieval
8096
- - dataset:
8097
- config: ruby
8098
  name: MTEB CodeSearchNetCCRetrieval (ruby)
8099
- revision: 6e1effa2c03723c5fde48ee912b5ee08d4f211e8
8100
- split: test
8101
  type: CoIR-Retrieval/CodeSearchNet-ccr
 
 
 
8102
  metrics:
8103
  - type: ndcg_at_1
8104
  value: 37.827
@@ -8382,14 +8386,14 @@ model-index:
8382
  value: 56.0608
8383
  - type: main_score
8384
  value: 51.686
8385
- task:
8386
  type: Retrieval
8387
- - dataset:
8388
- config: java
8389
  name: MTEB CodeSearchNetCCRetrieval (java)
8390
- revision: 6e1effa2c03723c5fde48ee912b5ee08d4f211e8
8391
- split: test
8392
  type: CoIR-Retrieval/CodeSearchNet-ccr
 
 
 
8393
  metrics:
8394
  - type: ndcg_at_1
8395
  value: 39.744
@@ -8673,14 +8677,14 @@ model-index:
8673
  value: 53.286100000000005
8674
  - type: main_score
8675
  value: 52.544000000000004
8676
- task:
8677
  type: Retrieval
8678
- - dataset:
8679
- config: php
8680
  name: MTEB CodeSearchNetCCRetrieval (php)
8681
- revision: 6e1effa2c03723c5fde48ee912b5ee08d4f211e8
8682
- split: test
8683
  type: CoIR-Retrieval/CodeSearchNet-ccr
 
 
 
8684
  metrics:
8685
  - type: ndcg_at_1
8686
  value: 29.685
@@ -8964,14 +8968,14 @@ model-index:
8964
  value: 45.535199999999996
8965
  - type: main_score
8966
  value: 41.814
8967
- task:
8968
  type: Retrieval
8969
- - dataset:
8970
- config: python
8971
  name: MTEB CodeSearchNetRetrieval (python)
8972
- revision: fdc6a9e39575768c27eb8a2a5f702bf846eb4759
8973
- split: test
8974
  type: code-search-net/code_search_net
 
 
 
8975
  metrics:
8976
  - type: ndcg_at_1
8977
  value: 73.5
@@ -9255,14 +9259,14 @@ model-index:
9255
  value: 71.0712
9256
  - type: main_score
9257
  value: 84.357
9258
- task:
9259
  type: Retrieval
9260
- - dataset:
9261
- config: javascript
9262
  name: MTEB CodeSearchNetRetrieval (javascript)
9263
- revision: fdc6a9e39575768c27eb8a2a5f702bf846eb4759
9264
- split: test
9265
  type: code-search-net/code_search_net
 
 
 
9266
  metrics:
9267
  - type: ndcg_at_1
9268
  value: 59.4
@@ -9546,14 +9550,14 @@ model-index:
9546
  value: 70.6705
9547
  - type: main_score
9548
  value: 71.384
9549
- task:
9550
  type: Retrieval
9551
- - dataset:
9552
- config: go
9553
  name: MTEB CodeSearchNetRetrieval (go)
9554
- revision: fdc6a9e39575768c27eb8a2a5f702bf846eb4759
9555
- split: test
9556
  type: code-search-net/code_search_net
 
 
 
9557
  metrics:
9558
  - type: ndcg_at_1
9559
  value: 71.39999999999999
@@ -9837,14 +9841,14 @@ model-index:
9837
  value: 72.7192
9838
  - type: main_score
9839
  value: 84.922
9840
- task:
9841
  type: Retrieval
9842
- - dataset:
9843
- config: ruby
9844
  name: MTEB CodeSearchNetRetrieval (ruby)
9845
- revision: fdc6a9e39575768c27eb8a2a5f702bf846eb4759
9846
- split: test
9847
  type: code-search-net/code_search_net
 
 
 
9848
  metrics:
9849
  - type: ndcg_at_1
9850
  value: 61.9
@@ -10128,14 +10132,14 @@ model-index:
10128
  value: 69.50460000000001
10129
  - type: main_score
10130
  value: 75.274
10131
- task:
10132
  type: Retrieval
10133
- - dataset:
10134
- config: java
10135
  name: MTEB CodeSearchNetRetrieval (java)
10136
- revision: fdc6a9e39575768c27eb8a2a5f702bf846eb4759
10137
- split: test
10138
  type: code-search-net/code_search_net
 
 
 
10139
  metrics:
10140
  - type: ndcg_at_1
10141
  value: 52.6
@@ -10419,14 +10423,14 @@ model-index:
10419
  value: 58.8048
10420
  - type: main_score
10421
  value: 69.447
10422
- task:
10423
  type: Retrieval
10424
- - dataset:
10425
- config: php
10426
  name: MTEB CodeSearchNetRetrieval (php)
10427
- revision: fdc6a9e39575768c27eb8a2a5f702bf846eb4759
10428
- split: test
10429
  type: code-search-net/code_search_net
 
 
 
10430
  metrics:
10431
  - type: ndcg_at_1
10432
  value: 57.699999999999996
@@ -10710,14 +10714,14 @@ model-index:
10710
  value: 63.361000000000004
10711
  - type: main_score
10712
  value: 73.455
10713
- task:
10714
  type: Retrieval
10715
- - dataset:
10716
- config: default
10717
  name: MTEB CodeTransOceanContest (default)
10718
- revision: 20da4eb20a4b17300c0986ee148c90867a7f2a4d
10719
- split: test
10720
  type: CoIR-Retrieval/codetrans-contest
 
 
 
10721
  metrics:
10722
  - type: ndcg_at_1
10723
  value: 46.154
@@ -11001,14 +11005,14 @@ model-index:
11001
  value: 77.0939
11002
  - type: main_score
11003
  value: 57.475
11004
- task:
11005
  type: Retrieval
11006
- - dataset:
11007
- config: default
11008
  name: MTEB CodeTransOceanDL (default)
11009
- revision: 281562cb8a1265ab5c0824bfa6ddcd9b0a15618f
11010
- split: test
11011
  type: CoIR-Retrieval/codetrans-dl
 
 
 
11012
  metrics:
11013
  - type: ndcg_at_1
11014
  value: 8.889
@@ -11292,14 +11296,14 @@ model-index:
11292
  value: 18.496499999999997
11293
  - type: main_score
11294
  value: 26.888
11295
- task:
11296
  type: Retrieval
11297
- - dataset:
11298
- config: default
11299
  name: MTEB CosQA (default)
11300
- revision: bc5efb7e9d437246ce393ed19d772e08e4a79535
11301
- split: test
11302
  type: CoIR-Retrieval/cosqa
 
 
 
11303
  metrics:
11304
  - type: ndcg_at_1
11305
  value: 15.4
@@ -11583,14 +11587,14 @@ model-index:
11583
  value: 29.583
11584
  - type: main_score
11585
  value: 35.449999999999996
11586
- task:
11587
  type: Retrieval
11588
- - dataset:
11589
- config: default
11590
  name: MTEB DBPedia (default)
11591
- revision: c0f706b76e590d620bd6618b3ca8efdd34e2d659
11592
- split: test
11593
  type: mteb/dbpedia
 
 
 
11594
  metrics:
11595
  - type: ndcg_at_1
11596
  value: 51.37500000000001
@@ -11874,14 +11878,14 @@ model-index:
11874
  value: 42.4844
11875
  - type: main_score
11876
  value: 35.96
11877
- task:
11878
- type: Retrieval
11879
- - dataset:
11880
- config: default
11881
  name: MTEB EmotionClassification (default)
11882
- revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37
11883
- split: test
11884
  type: mteb/emotion
 
 
 
11885
  metrics:
11886
  - type: accuracy
11887
  value: 38.795
@@ -11891,14 +11895,14 @@ model-index:
11891
  value: 40.7945
11892
  - type: main_score
11893
  value: 38.795
11894
- task:
11895
- type: Classification
11896
- - dataset:
11897
- config: default
11898
  name: MTEB FEVER (default)
11899
- revision: bea83ef9e8fb933d90a2f1d5515737465d613e12
11900
- split: test
11901
  type: mteb/fever
 
 
 
11902
  metrics:
11903
  - type: ndcg_at_1
11904
  value: 79.08800000000001
@@ -12182,14 +12186,14 @@ model-index:
12182
  value: 62.96469999999999
12183
  - type: main_score
12184
  value: 85.528
12185
- task:
12186
  type: Retrieval
12187
- - dataset:
12188
- config: default
12189
  name: MTEB FiQA2018 (default)
12190
- revision: 27a168819829fe9bcd655c2df245fb19452e8e06
12191
- split: test
12192
  type: mteb/fiqa
 
 
 
12193
  metrics:
12194
  - type: ndcg_at_1
12195
  value: 35.494
@@ -12473,14 +12477,14 @@ model-index:
12473
  value: 43.4086
12474
  - type: main_score
12475
  value: 36.851
12476
- task:
12477
  type: Retrieval
12478
- - dataset:
12479
- config: default
12480
  name: MTEB HotpotQA (default)
12481
- revision: ab518f4d6fcca38d87c25209f94beba119d02014
12482
- split: test
12483
  type: mteb/hotpotqa
 
 
 
12484
  metrics:
12485
  - type: ndcg_at_1
12486
  value: 73.531
@@ -12764,14 +12768,14 @@ model-index:
12764
  value: 66.328
12765
  - type: main_score
12766
  value: 62.918
12767
- task:
12768
- type: Retrieval
12769
- - dataset:
12770
- config: default
12771
  name: MTEB ImdbClassification (default)
12772
- revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7
12773
- split: test
12774
  type: mteb/imdb
 
 
 
12775
  metrics:
12776
  - type: accuracy
12777
  value: 62.2348
@@ -12785,14 +12789,14 @@ model-index:
12785
  value: 57.750800000000005
12786
  - type: main_score
12787
  value: 62.2348
12788
- task:
12789
- type: Classification
12790
- - dataset:
12791
- config: default
12792
  name: MTEB MSMARCO (default)
12793
- revision: c5a29a104738b98a9e76336939199e264163d4a0
12794
- split: dev
12795
  type: mteb/msmarco
 
 
 
12796
  metrics:
12797
  - type: ndcg_at_1
12798
  value: 15.085999999999999
@@ -13076,14 +13080,14 @@ model-index:
13076
  value: 26.801000000000002
13077
  - type: main_score
13078
  value: 30.711
13079
- task:
13080
- type: Retrieval
13081
- - dataset:
13082
- config: en
13083
  name: MTEB MTOPDomainClassification (en)
13084
- revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf
13085
- split: test
13086
  type: mteb/mtop_domain
 
 
 
13087
  metrics:
13088
  - type: accuracy
13089
  value: 89.4505
@@ -13093,14 +13097,14 @@ model-index:
13093
  value: 89.442
13094
  - type: main_score
13095
  value: 89.4505
13096
- task:
13097
  type: Classification
13098
- - dataset:
13099
- config: en
13100
  name: MTEB MTOPIntentClassification (en)
13101
- revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba
13102
- split: test
13103
  type: mteb/mtop_intent
 
 
 
13104
  metrics:
13105
  - type: accuracy
13106
  value: 56.846799999999995
@@ -13110,14 +13114,14 @@ model-index:
13110
  value: 58.797999999999995
13111
  - type: main_score
13112
  value: 56.846799999999995
13113
- task:
13114
  type: Classification
13115
- - dataset:
13116
- config: en
13117
  name: MTEB MassiveIntentClassification (en)
13118
- revision: 4672e20407010da34463acc759c162ca9734bca6
13119
- split: test
13120
  type: mteb/amazon_massive_intent
 
 
 
13121
  metrics:
13122
  - type: accuracy
13123
  value: 64.768
@@ -13127,14 +13131,14 @@ model-index:
13127
  value: 63.67
13128
  - type: main_score
13129
  value: 64.768
13130
- task:
13131
  type: Classification
13132
- - dataset:
13133
- config: en
13134
  name: MTEB MassiveScenarioClassification (en)
13135
- revision: fad2c6e8459f9e1c45d9315f4953d921437d70f8
13136
- split: test
13137
  type: mteb/amazon_massive_scenario
 
 
 
13138
  metrics:
13139
  - type: accuracy
13140
  value: 71.3416
@@ -13144,14 +13148,14 @@ model-index:
13144
  value: 71.19680000000001
13145
  - type: main_score
13146
  value: 71.3416
13147
- task:
13148
- type: Classification
13149
- - dataset:
13150
- config: default
13151
  name: MTEB MedrxivClusteringP2P (default)
13152
- revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73
13153
- split: test
13154
  type: mteb/medrxiv-clustering-p2p
 
 
 
13155
  metrics:
13156
  - type: v_measure
13157
  value: 32.5684
@@ -13159,14 +13163,14 @@ model-index:
13159
  value: 1.6362999999999999
13160
  - type: main_score
13161
  value: 32.5684
13162
- task:
13163
  type: Clustering
13164
- - dataset:
13165
- config: default
13166
  name: MTEB MedrxivClusteringS2S (default)
13167
- revision: 35191c8c0dca72d8ff3efcd72aa802307d469663
13168
- split: test
13169
  type: mteb/medrxiv-clustering-s2s
 
 
 
13170
  metrics:
13171
  - type: v_measure
13172
  value: 31.551299999999998
@@ -13174,14 +13178,14 @@ model-index:
13174
  value: 1.7208999999999999
13175
  - type: main_score
13176
  value: 31.551299999999998
13177
- task:
13178
- type: Clustering
13179
- - dataset:
13180
- config: default
13181
  name: MTEB MindSmallReranking (default)
13182
- revision: 59042f120c80e8afa9cdbb224f67076cec0fc9a7
13183
- split: test
13184
  type: mteb/mind_small
 
 
 
13185
  metrics:
13186
  - type: map
13187
  value: 30.883
@@ -13201,14 +13205,14 @@ model-index:
13201
  value: 13.2767
13202
  - type: main_score
13203
  value: 30.883
13204
- task:
13205
- type: Reranking
13206
- - dataset:
13207
- config: default
13208
  name: MTEB NFCorpus (default)
13209
- revision: ec0fa4fe99da2ff19ca1214b7966684033a58814
13210
- split: test
13211
  type: mteb/nfcorpus
 
 
 
13212
  metrics:
13213
  - type: ndcg_at_1
13214
  value: 41.486000000000004
@@ -13492,14 +13496,14 @@ model-index:
13492
  value: 33.1177
13493
  - type: main_score
13494
  value: 33.737
13495
- task:
13496
  type: Retrieval
13497
- - dataset:
13498
- config: default
13499
  name: MTEB NQ (default)
13500
- revision: b774495ed302d8c44a3a7ea25c90dbce03968f31
13501
- split: test
13502
  type: mteb/nq
 
 
 
13503
  metrics:
13504
  - type: ndcg_at_1
13505
  value: 32.793
@@ -13783,14 +13787,14 @@ model-index:
13783
  value: 27.4444
13784
  - type: main_score
13785
  value: 51.63100000000001
13786
- task:
13787
  type: Retrieval
13788
- - dataset:
13789
- config: default
13790
  name: MTEB QuoraRetrieval (default)
13791
- revision: e4e08e0b7dbe3c8700f0daef558ff32256715259
13792
- split: test
13793
  type: mteb/quora
 
 
 
13794
  metrics:
13795
  - type: ndcg_at_1
13796
  value: 79.36999999999999
@@ -14074,14 +14078,14 @@ model-index:
14074
  value: 75.69369999999999
14075
  - type: main_score
14076
  value: 86.696
14077
- task:
14078
- type: Retrieval
14079
- - dataset:
14080
- config: default
14081
  name: MTEB RedditClustering (default)
14082
- revision: 24640382cdbf8abc73003fb0fa6d111a705499eb
14083
- split: test
14084
  type: mteb/reddit-clustering
 
 
 
14085
  metrics:
14086
  - type: v_measure
14087
  value: 50.019999999999996
@@ -14089,14 +14093,14 @@ model-index:
14089
  value: 4.5914
14090
  - type: main_score
14091
  value: 50.019999999999996
14092
- task:
14093
  type: Clustering
14094
- - dataset:
14095
- config: default
14096
  name: MTEB RedditClusteringP2P (default)
14097
- revision: 385e3cb46b4cfa89021f56c4380204149d0efe33
14098
- split: test
14099
  type: mteb/reddit-clustering-p2p
 
 
 
14100
  metrics:
14101
  - type: v_measure
14102
  value: 53.9756
@@ -14104,14 +14108,14 @@ model-index:
14104
  value: 11.6573
14105
  - type: main_score
14106
  value: 53.9756
14107
- task:
14108
- type: Clustering
14109
- - dataset:
14110
- config: default
14111
  name: MTEB SCIDOCS (default)
14112
- revision: f8c2fcf00f625baaa80f62ec5bd9e1fff3b8ae88
14113
- split: test
14114
  type: mteb/scidocs
 
 
 
14115
  metrics:
14116
  - type: ndcg_at_1
14117
  value: 24.6
@@ -14395,14 +14399,14 @@ model-index:
14395
  value: 20.8635
14396
  - type: main_score
14397
  value: 22.542
14398
- task:
14399
- type: Retrieval
14400
- - dataset:
14401
- config: default
14402
  name: MTEB SICK-R (default)
14403
- revision: 20a6d6f312dd54037fe07a32d58e5e168867909d
14404
- split: test
14405
  type: mteb/sickr-sts
 
 
 
14406
  metrics:
14407
  - type: pearson
14408
  value: 77.4874
@@ -14422,14 +14426,14 @@ model-index:
14422
  value: 68.79809999999999
14423
  - type: main_score
14424
  value: 68.79809999999999
14425
- task:
14426
  type: STS
14427
- - dataset:
14428
- config: default
14429
  name: MTEB STS12 (default)
14430
- revision: a0d554a64d88156834ff5ae9920b964011b16384
14431
- split: test
14432
  type: mteb/sts12-sts
 
 
 
14433
  metrics:
14434
  - type: pearson
14435
  value: 67.8391
@@ -14449,14 +14453,14 @@ model-index:
14449
  value: 64.7722
14450
  - type: main_score
14451
  value: 64.77380000000001
14452
- task:
14453
  type: STS
14454
- - dataset:
14455
- config: default
14456
  name: MTEB STS13 (default)
14457
- revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca
14458
- split: test
14459
  type: mteb/sts13-sts
 
 
 
14460
  metrics:
14461
  - type: pearson
14462
  value: 78.8177
@@ -14476,14 +14480,14 @@ model-index:
14476
  value: 79.3253
14477
  - type: main_score
14478
  value: 79.3253
14479
- task:
14480
  type: STS
14481
- - dataset:
14482
- config: default
14483
  name: MTEB STS14 (default)
14484
- revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375
14485
- split: test
14486
  type: mteb/sts14-sts
 
 
 
14487
  metrics:
14488
  - type: pearson
14489
  value: 75.6791
@@ -14503,14 +14507,14 @@ model-index:
14503
  value: 70.1701
14504
  - type: main_score
14505
  value: 70.1701
14506
- task:
14507
  type: STS
14508
- - dataset:
14509
- config: default
14510
  name: MTEB STS15 (default)
14511
- revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3
14512
- split: test
14513
  type: mteb/sts15-sts
 
 
 
14514
  metrics:
14515
  - type: pearson
14516
  value: 80.4413
@@ -14530,14 +14534,14 @@ model-index:
14530
  value: 82.0343
14531
  - type: main_score
14532
  value: 82.0343
14533
- task:
14534
  type: STS
14535
- - dataset:
14536
- config: default
14537
  name: MTEB STS16 (default)
14538
- revision: 4d8694f8f0e0100860b497b999b3dbed754a0513
14539
- split: test
14540
  type: mteb/sts16-sts
 
 
 
14541
  metrics:
14542
  - type: pearson
14543
  value: 77.172
@@ -14557,14 +14561,14 @@ model-index:
14557
  value: 78.9633
14558
  - type: main_score
14559
  value: 78.9633
14560
- task:
14561
  type: STS
14562
- - dataset:
14563
- config: en-en
14564
  name: MTEB STS17 (en-en)
14565
- revision: faeb762787bd10488a50c8b5be4a3b82e411949c
14566
- split: test
14567
  type: mteb/sts17-crosslingual-sts
 
 
 
14568
  metrics:
14569
  - type: pearson
14570
  value: 83.5117
@@ -14584,14 +14588,14 @@ model-index:
14584
  value: 84.64970000000001
14585
  - type: main_score
14586
  value: 84.64970000000001
14587
- task:
14588
  type: STS
14589
- - dataset:
14590
- config: es-en
14591
  name: MTEB STS17 (es-en)
14592
- revision: faeb762787bd10488a50c8b5be4a3b82e411949c
14593
- split: test
14594
  type: mteb/sts17-crosslingual-sts
 
 
 
14595
  metrics:
14596
  - type: pearson
14597
  value: 29.0052
@@ -14611,14 +14615,14 @@ model-index:
14611
  value: 30.640299999999996
14612
  - type: main_score
14613
  value: 30.640299999999996
14614
- task:
14615
  type: STS
14616
- - dataset:
14617
- config: nl-en
14618
  name: MTEB STS17 (nl-en)
14619
- revision: faeb762787bd10488a50c8b5be4a3b82e411949c
14620
- split: test
14621
  type: mteb/sts17-crosslingual-sts
 
 
 
14622
  metrics:
14623
  - type: pearson
14624
  value: 42.0755
@@ -14638,14 +14642,14 @@ model-index:
14638
  value: 39.7565
14639
  - type: main_score
14640
  value: 39.763999999999996
14641
- task:
14642
  type: STS
14643
- - dataset:
14644
- config: en-de
14645
  name: MTEB STS17 (en-de)
14646
- revision: faeb762787bd10488a50c8b5be4a3b82e411949c
14647
- split: test
14648
  type: mteb/sts17-crosslingual-sts
 
 
 
14649
  metrics:
14650
  - type: pearson
14651
  value: 44.2318
@@ -14665,14 +14669,14 @@ model-index:
14665
  value: 46.5518
14666
  - type: main_score
14667
  value: 46.5518
14668
- task:
14669
  type: STS
14670
- - dataset:
14671
- config: fr-en
14672
  name: MTEB STS17 (fr-en)
14673
- revision: faeb762787bd10488a50c8b5be4a3b82e411949c
14674
- split: test
14675
  type: mteb/sts17-crosslingual-sts
 
 
 
14676
  metrics:
14677
  - type: pearson
14678
  value: 36.716100000000004
@@ -14692,14 +14696,14 @@ model-index:
14692
  value: 34.6968
14693
  - type: main_score
14694
  value: 34.6968
14695
- task:
14696
  type: STS
14697
- - dataset:
14698
- config: en-ar
14699
  name: MTEB STS17 (en-ar)
14700
- revision: faeb762787bd10488a50c8b5be4a3b82e411949c
14701
- split: test
14702
  type: mteb/sts17-crosslingual-sts
 
 
 
14703
  metrics:
14704
  - type: pearson
14705
  value: 21.2825
@@ -14719,14 +14723,14 @@ model-index:
14719
  value: 17.6922
14720
  - type: main_score
14721
  value: 17.6922
14722
- task:
14723
  type: STS
14724
- - dataset:
14725
- config: it-en
14726
  name: MTEB STS17 (it-en)
14727
- revision: faeb762787bd10488a50c8b5be4a3b82e411949c
14728
- split: test
14729
  type: mteb/sts17-crosslingual-sts
 
 
 
14730
  metrics:
14731
  - type: pearson
14732
  value: 32.1584
@@ -14746,14 +14750,14 @@ model-index:
14746
  value: 27.9254
14747
  - type: main_score
14748
  value: 27.9254
14749
- task:
14750
  type: STS
14751
- - dataset:
14752
- config: en-tr
14753
  name: MTEB STS17 (en-tr)
14754
- revision: faeb762787bd10488a50c8b5be4a3b82e411949c
14755
- split: test
14756
  type: mteb/sts17-crosslingual-sts
 
 
 
14757
  metrics:
14758
  - type: pearson
14759
  value: 21.0842
@@ -14773,14 +14777,14 @@ model-index:
14773
  value: 18.5115
14774
  - type: main_score
14775
  value: 18.5115
14776
- task:
14777
  type: STS
14778
- - dataset:
14779
- config: en
14780
  name: MTEB STS22 (en)
14781
- revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3
14782
- split: test
14783
  type: mteb/sts22-crosslingual-sts
 
 
 
14784
  metrics:
14785
  - type: pearson
14786
  value: 66.9563
@@ -14800,14 +14804,14 @@ model-index:
14800
  value: 67.4747
14801
  - type: main_score
14802
  value: 67.4747
14803
- task:
14804
  type: STS
14805
- - dataset:
14806
- config: de-en
14807
  name: MTEB STS22 (de-en)
14808
- revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3
14809
- split: test
14810
  type: mteb/sts22-crosslingual-sts
 
 
 
14811
  metrics:
14812
  - type: pearson
14813
  value: 56.3095
@@ -14827,14 +14831,14 @@ model-index:
14827
  value: 54.1005
14828
  - type: main_score
14829
  value: 54.1005
14830
- task:
14831
  type: STS
14832
- - dataset:
14833
- config: es-en
14834
  name: MTEB STS22 (es-en)
14835
- revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3
14836
- split: test
14837
  type: mteb/sts22-crosslingual-sts
 
 
 
14838
  metrics:
14839
  - type: pearson
14840
  value: 62.0575
@@ -14854,14 +14858,14 @@ model-index:
14854
  value: 66.9527
14855
  - type: main_score
14856
  value: 66.9527
14857
- task:
14858
  type: STS
14859
- - dataset:
14860
- config: pl-en
14861
  name: MTEB STS22 (pl-en)
14862
- revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3
14863
- split: test
14864
  type: mteb/sts22-crosslingual-sts
 
 
 
14865
  metrics:
14866
  - type: pearson
14867
  value: 68.42439999999999
@@ -14881,14 +14885,14 @@ model-index:
14881
  value: 69.0444
14882
  - type: main_score
14883
  value: 69.0444
14884
- task:
14885
  type: STS
14886
- - dataset:
14887
- config: zh-en
14888
  name: MTEB STS22 (zh-en)
14889
- revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3
14890
- split: test
14891
  type: mteb/sts22-crosslingual-sts
 
 
 
14892
  metrics:
14893
  - type: pearson
14894
  value: 34.164699999999996
@@ -14908,14 +14912,14 @@ model-index:
14908
  value: 36.1776
14909
  - type: main_score
14910
  value: 36.1776
14911
- task:
14912
  type: STS
14913
- - dataset:
14914
- config: default
14915
  name: MTEB STSBenchmark (default)
14916
- revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831
14917
- split: test
14918
  type: mteb/stsbenchmark-sts
 
 
 
14919
  metrics:
14920
  - type: pearson
14921
  value: 78.0802
@@ -14935,14 +14939,14 @@ model-index:
14935
  value: 78.0444
14936
  - type: main_score
14937
  value: 78.0444
14938
- task:
14939
- type: STS
14940
- - dataset:
14941
- config: default
14942
  name: MTEB SciDocsRR (default)
14943
- revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab
14944
- split: test
14945
  type: mteb/scidocs-reranking
 
 
 
14946
  metrics:
14947
  - type: map
14948
  value: 86.4489
@@ -14962,14 +14966,14 @@ model-index:
14962
  value: 42.9429
14963
  - type: main_score
14964
  value: 86.4489
14965
- task:
14966
- type: Reranking
14967
- - dataset:
14968
- config: default
14969
  name: MTEB SciFact (default)
14970
- revision: 0228b52cf27578f30900b9e5271d331663a030d7
14971
- split: test
14972
  type: mteb/scifact
 
 
 
14973
  metrics:
14974
  - type: ndcg_at_1
14975
  value: 59.333000000000006
@@ -15253,14 +15257,14 @@ model-index:
15253
  value: 75.2553
15254
  - type: main_score
15255
  value: 71.27
15256
- task:
15257
- type: Retrieval
15258
- - dataset:
15259
- config: default
15260
  name: MTEB SprintDuplicateQuestions (default)
15261
- revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46
15262
- split: test
15263
  type: mteb/sprintduplicatequestions-pairclassification
 
 
 
15264
  metrics:
15265
  - type: similarity_accuracy
15266
  value: 99.7604
@@ -15344,14 +15348,14 @@ model-index:
15344
  value: 94.17
15345
  - type: main_score
15346
  value: 94.17
15347
- task:
15348
- type: PairClassification
15349
- - dataset:
15350
- config: default
15351
  name: MTEB StackExchangeClustering (default)
15352
- revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259
15353
- split: test
15354
  type: mteb/stackexchange-clustering
 
 
 
15355
  metrics:
15356
  - type: v_measure
15357
  value: 64.6589
@@ -15359,14 +15363,14 @@ model-index:
15359
  value: 4.734
15360
  - type: main_score
15361
  value: 64.6589
15362
- task:
15363
  type: Clustering
15364
- - dataset:
15365
- config: default
15366
  name: MTEB StackExchangeClusteringP2P (default)
15367
- revision: 815ca46b2622cec33ccafc3735d572c266efdb44
15368
- split: test
15369
  type: mteb/stackexchange-clustering-p2p
 
 
 
15370
  metrics:
15371
  - type: v_measure
15372
  value: 32.9388
@@ -15374,14 +15378,14 @@ model-index:
15374
  value: 1.6312
15375
  - type: main_score
15376
  value: 32.9388
15377
- task:
15378
- type: Clustering
15379
- - dataset:
15380
- config: default
15381
  name: MTEB StackOverflowDupQuestions (default)
15382
- revision: e185fbe320c72810689fc5848eb6114e1ef5ec69
15383
- split: test
15384
  type: mteb/stackoverflowdupquestions-reranking
 
 
 
15385
  metrics:
15386
  - type: map
15387
  value: 52.645399999999995
@@ -15401,14 +15405,14 @@ model-index:
15401
  value: 39.409499999999994
15402
  - type: main_score
15403
  value: 52.645399999999995
15404
- task:
15405
- type: Reranking
15406
- - dataset:
15407
- config: default
15408
  name: MTEB StackOverflowQA (default)
15409
- revision: db8f169f3894c14a00251061f957b2063eef2bd5
15410
- split: test
15411
  type: CoIR-Retrieval/stackoverflow-qa
 
 
 
15412
  metrics:
15413
  - type: ndcg_at_1
15414
  value: 74.97500000000001
@@ -15692,14 +15696,14 @@ model-index:
15692
  value: 82.238
15693
  - type: main_score
15694
  value: 83.92699999999999
15695
- task:
15696
- type: Retrieval
15697
- - dataset:
15698
- config: default
15699
  name: MTEB SummEval (default)
15700
- revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c
15701
- split: test
15702
  type: mteb/summeval
 
 
 
15703
  metrics:
15704
  - type: pearson
15705
  value: 29.8395
@@ -15715,14 +15719,14 @@ model-index:
15715
  value: 29.8395
15716
  - type: main_score
15717
  value: 29.383
15718
- task:
15719
- type: Summarization
15720
- - dataset:
15721
- config: default
15722
  name: MTEB SyntheticText2SQL (default)
15723
- revision: 686b87296c3a0191b5d9415a00526c62db9fce09
15724
- split: test
15725
  type: CoIR-Retrieval/synthetic-text2sql
 
 
 
15726
  metrics:
15727
  - type: ndcg_at_1
15728
  value: 4.222
@@ -16006,14 +16010,14 @@ model-index:
16006
  value: -52.9095
16007
  - type: main_score
16008
  value: 44.775
16009
- task:
16010
  type: Retrieval
16011
- - dataset:
16012
- config: default
16013
  name: MTEB TRECCOVID (default)
16014
- revision: bb9466bac8153a0349341eb1b22e06409e78ef4e
16015
- split: test
16016
  type: mteb/trec-covid
 
 
 
16017
  metrics:
16018
  - type: ndcg_at_1
16019
  value: 70.0
@@ -16297,14 +16301,14 @@ model-index:
16297
  value: -5.6598999999999995
16298
  - type: main_score
16299
  value: 63.098
16300
- task:
16301
  type: Retrieval
16302
- - dataset:
16303
- config: default
16304
  name: MTEB Touche2020 (default)
16305
- revision: a34f9a33db75fa0cbb21bb5cfc3dae8dc8bec93f
16306
- split: test
16307
  type: mteb/touche2020
 
 
 
16308
  metrics:
16309
  - type: ndcg_at_1
16310
  value: 23.469
@@ -16588,14 +16592,14 @@ model-index:
16588
  value: -0.7001000000000001
16589
  - type: main_score
16590
  value: 24.029
16591
- task:
16592
- type: Retrieval
16593
- - dataset:
16594
- config: default
16595
  name: MTEB ToxicConversationsClassification (default)
16596
- revision: edfaf9da55d3dd50d43143d90c1ac476895ae6de
16597
- split: test
16598
  type: mteb/toxic_conversations_50k
 
 
 
16599
  metrics:
16600
  - type: accuracy
16601
  value: 62.9395
@@ -16609,14 +16613,14 @@ model-index:
16609
  value: 10.306600000000001
16610
  - type: main_score
16611
  value: 62.9395
16612
- task:
16613
  type: Classification
16614
- - dataset:
16615
- config: default
16616
  name: MTEB TweetSentimentExtractionClassification (default)
16617
- revision: d604517c81ca91fe16a244d1248fc021f9ecee7a
16618
- split: test
16619
  type: mteb/tweet_sentiment_extraction
 
 
 
16620
  metrics:
16621
  - type: accuracy
16622
  value: 52.8721
@@ -16626,14 +16630,14 @@ model-index:
16626
  value: 52.4319
16627
  - type: main_score
16628
  value: 52.8721
16629
- task:
16630
- type: Classification
16631
- - dataset:
16632
- config: default
16633
  name: MTEB TwentyNewsgroupsClustering (default)
16634
- revision: 6125ec4e24fa026cec8a478383ee943acfbd5449
16635
- split: test
16636
  type: mteb/twentynewsgroups-clustering
 
 
 
16637
  metrics:
16638
  - type: v_measure
16639
  value: 44.9227
@@ -16641,14 +16645,14 @@ model-index:
16641
  value: 1.1638000000000002
16642
  - type: main_score
16643
  value: 44.9227
16644
- task:
16645
- type: Clustering
16646
- - dataset:
16647
- config: default
16648
  name: MTEB TwitterSemEval2015 (default)
16649
- revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1
16650
- split: test
16651
  type: mteb/twittersemeval2015-pairclassification
 
 
 
16652
  metrics:
16653
  - type: similarity_accuracy
16654
  value: 82.04090000000001
@@ -16732,14 +16736,14 @@ model-index:
16732
  value: 60.0317
16733
  - type: main_score
16734
  value: 60.0317
16735
- task:
16736
  type: PairClassification
16737
- - dataset:
16738
- config: default
16739
  name: MTEB TwitterURLCorpus (default)
16740
- revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf
16741
- split: test
16742
  type: mteb/twitterurlcorpus-pairclassification
 
 
 
16743
  metrics:
16744
  - type: similarity_accuracy
16745
  value: 87.3035
@@ -16823,9 +16827,6 @@ model-index:
16823
  value: 82.5792
16824
  - type: main_score
16825
  value: 82.5792
16826
- task:
16827
- type: PairClassification
16828
- pipeline_tag: sentence-similarity
16829
  ---
16830
  # Granite-Embedding-30m-English
16831
 
 
9
  - embeddings
10
  - mteb
11
  - transformers
12
+ - onnx
13
+ pipeline_tag: sentence-similarity
14
  model-index:
15
  - name: ibm-granite/granite-embedding-30m-english
16
  results:
17
+ - task:
18
+ type: Classification
19
+ dataset:
20
  name: MTEB AmazonCounterfactualClassification (en-ext)
 
 
21
  type: mteb/amazon_counterfactual
22
+ config: en-ext
23
+ split: test
24
+ revision: e8379541af4e31359cca9fbcf4b00f2671dba205
25
  metrics:
26
  - type: accuracy
27
  value: 62.856100000000005
 
35
  value: 15.4995
36
  - type: main_score
37
  value: 62.856100000000005
38
+ - task:
39
  type: Classification
40
+ dataset:
 
41
  name: MTEB AmazonCounterfactualClassification (en)
 
 
42
  type: mteb/amazon_counterfactual
43
+ config: en
44
+ split: test
45
+ revision: e8379541af4e31359cca9fbcf4b00f2671dba205
46
  metrics:
47
  - type: accuracy
48
  value: 60.925399999999996
 
56
  value: 25.0517
57
  - type: main_score
58
  value: 60.925399999999996
59
+ - task:
60
  type: Classification
61
+ dataset:
 
62
  name: MTEB AmazonPolarityClassification (default)
 
 
63
  type: mteb/amazon_polarity
64
+ config: default
65
+ split: test
66
+ revision: e2d317d38cd51312af73b3d32a06d1a08b442046
67
  metrics:
68
  - type: accuracy
69
  value: 62.983599999999996
 
77
  value: 58.3423
78
  - type: main_score
79
  value: 62.983599999999996
80
+ - task:
81
  type: Classification
82
+ dataset:
 
83
  name: MTEB AmazonReviewsClassification (en)
 
 
84
  type: mteb/amazon_reviews_multi
85
+ config: en
86
+ split: test
87
+ revision: 1399c76144fd37290681b995c656ef9b2e06e26d
88
  metrics:
89
  - type: accuracy
90
  value: 32.178000000000004
 
94
  value: 31.5201
95
  - type: main_score
96
  value: 32.178000000000004
97
+ - task:
98
+ type: Retrieval
99
+ dataset:
 
100
  name: MTEB AppsRetrieval (default)
 
 
101
  type: CoIR-Retrieval/apps
102
+ config: default
103
+ split: test
104
+ revision: f22508f96b7a36c2415181ed8bb76f76e04ae2d5
105
  metrics:
106
  - type: ndcg_at_1
107
  value: 3.5060000000000002
 
385
  value: 29.7236
386
  - type: main_score
387
  value: 6.203
388
+ - task:
389
  type: Retrieval
390
+ dataset:
 
391
  name: MTEB ArguAna (default)
 
 
392
  type: mteb/arguana
393
+ config: default
394
+ split: test
395
+ revision: c22ab2a51041ffd869aaddef7af8d8215647e41a
396
  metrics:
397
  - type: ndcg_at_1
398
  value: 31.791999999999998
 
676
  value: 11.3251
677
  - type: main_score
678
  value: 56.355999999999995
679
+ - task:
680
+ type: Clustering
681
+ dataset:
 
682
  name: MTEB ArxivClusteringP2P (default)
 
 
683
  type: mteb/arxiv-clustering-p2p
684
+ config: default
685
+ split: test
686
+ revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d
687
  metrics:
688
  - type: v_measure
689
  value: 46.813
 
691
  value: 13.830899999999998
692
  - type: main_score
693
  value: 46.813
694
+ - task:
695
  type: Clustering
696
+ dataset:
 
697
  name: MTEB ArxivClusteringS2S (default)
 
 
698
  type: mteb/arxiv-clustering-s2s
699
+ config: default
700
+ split: test
701
+ revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53
702
  metrics:
703
  - type: v_measure
704
  value: 41.9895
 
706
  value: 14.3004
707
  - type: main_score
708
  value: 41.9895
709
+ - task:
710
+ type: Reranking
711
+ dataset:
 
712
  name: MTEB AskUbuntuDupQuestions (default)
 
 
713
  type: mteb/askubuntudupquestions-reranking
714
+ config: default
715
+ split: test
716
+ revision: 2000358ca161889fa9c082cb41daa8dcfb161a54
717
  metrics:
718
  - type: map
719
  value: 64.1329
 
733
  value: 23.508699999999997
734
  - type: main_score
735
  value: 64.1329
736
+ - task:
737
+ type: STS
738
+ dataset:
 
739
  name: MTEB BIOSSES (default)
 
 
740
  type: mteb/biosses-sts
741
+ config: default
742
+ split: test
743
+ revision: d3fb88f8f02e40887cd149695127462bbcf29b4a
744
  metrics:
745
  - type: pearson
746
  value: 90.2058
 
760
  value: 88.1641
761
  - type: main_score
762
  value: 88.1641
763
+ - task:
764
+ type: Classification
765
+ dataset:
 
766
  name: MTEB Banking77Classification (default)
 
 
767
  type: mteb/banking77
768
+ config: default
769
+ split: test
770
+ revision: 0fd18e25b25c072e09e0d92ab615fda904d66300
771
  metrics:
772
  - type: accuracy
773
  value: 77.3247
 
777
  value: 76.3532
778
  - type: main_score
779
  value: 77.3247
780
+ - task:
781
+ type: Clustering
782
+ dataset:
 
783
  name: MTEB BiorxivClusteringP2P (default)
 
 
784
  type: mteb/biorxiv-clustering-p2p
785
+ config: default
786
+ split: test
787
+ revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40
788
  metrics:
789
  - type: v_measure
790
  value: 39.018
 
792
  value: 0.7512
793
  - type: main_score
794
  value: 39.018
795
+ - task:
796
  type: Clustering
797
+ dataset:
 
798
  name: MTEB BiorxivClusteringS2S (default)
 
 
799
  type: mteb/biorxiv-clustering-s2s
800
+ config: default
801
+ split: test
802
+ revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908
803
  metrics:
804
  - type: v_measure
805
  value: 36.8097
 
807
  value: 0.9368
808
  - type: main_score
809
  value: 36.8097
810
+ - task:
811
+ type: Retrieval
812
+ dataset:
 
813
  name: MTEB COIRCodeSearchNetRetrieval (python)
 
 
814
  type: CoIR-Retrieval/CodeSearchNet
815
+ config: python
816
+ split: test
817
+ revision: 4adc7bc41202b5c13543c9c886a25f340634dab3
818
  metrics:
819
  - type: ndcg_at_1
820
  value: 85.353
 
1098
  value: 88.457
1099
  - type: main_score
1100
  value: 90.89699999999999
1101
+ - task:
1102
  type: Retrieval
1103
+ dataset:
 
1104
  name: MTEB COIRCodeSearchNetRetrieval (javascript)
 
 
1105
  type: CoIR-Retrieval/CodeSearchNet
1106
+ config: javascript
1107
+ split: test
1108
+ revision: 4adc7bc41202b5c13543c9c886a25f340634dab3
1109
  metrics:
1110
  - type: ndcg_at_1
1111
  value: 35.46
 
1389
  value: 58.6477
1390
  - type: main_score
1391
  value: 46.54
1392
+ - task:
1393
  type: Retrieval
1394
+ dataset:
 
1395
  name: MTEB COIRCodeSearchNetRetrieval (go)
 
 
1396
  type: CoIR-Retrieval/CodeSearchNet
1397
+ config: go
1398
+ split: test
1399
+ revision: 4adc7bc41202b5c13543c9c886a25f340634dab3
1400
  metrics:
1401
  - type: ndcg_at_1
1402
  value: 45.728
 
1680
  value: 58.7689
1681
  - type: main_score
1682
  value: 59.471
1683
+ - task:
1684
  type: Retrieval
1685
+ dataset:
 
1686
  name: MTEB COIRCodeSearchNetRetrieval (ruby)
 
 
1687
  type: CoIR-Retrieval/CodeSearchNet
1688
+ config: ruby
1689
+ split: test
1690
+ revision: 4adc7bc41202b5c13543c9c886a25f340634dab3
1691
  metrics:
1692
  - type: ndcg_at_1
1693
  value: 38.144
 
1971
  value: 52.5296
1972
  - type: main_score
1973
  value: 50.166
1974
+ - task:
1975
  type: Retrieval
1976
+ dataset:
 
1977
  name: MTEB COIRCodeSearchNetRetrieval (java)
 
 
1978
  type: CoIR-Retrieval/CodeSearchNet
1979
+ config: java
1980
+ split: test
1981
+ revision: 4adc7bc41202b5c13543c9c886a25f340634dab3
1982
  metrics:
1983
  - type: ndcg_at_1
1984
  value: 42.355
 
2262
  value: 57.312
2263
  - type: main_score
2264
  value: 55.062
2265
+ - task:
2266
  type: Retrieval
2267
+ dataset:
 
2268
  name: MTEB COIRCodeSearchNetRetrieval (php)
 
 
2269
  type: CoIR-Retrieval/CodeSearchNet
2270
+ config: php
2271
+ split: test
2272
+ revision: 4adc7bc41202b5c13543c9c886a25f340634dab3
2273
  metrics:
2274
  - type: ndcg_at_1
2275
  value: 36.835
 
2553
  value: 53.6044
2554
  - type: main_score
2555
  value: 49.784
2556
+ - task:
2557
  type: Retrieval
2558
+ dataset:
 
2559
  name: MTEB CQADupstackAndroidRetrieval (default)
 
 
2560
  type: mteb/cqadupstack-android
2561
+ config: default
2562
+ split: test
2563
+ revision: f46a197baaae43b4f621051089b82a364682dfeb
2564
  metrics:
2565
  - type: ndcg_at_1
2566
  value: 44.206
 
2844
  value: 48.1413
2845
  - type: main_score
2846
  value: 54.106
2847
+ - task:
2848
  type: Retrieval
2849
+ dataset:
 
2850
  name: MTEB CQADupstackEnglishRetrieval (default)
 
 
2851
  type: mteb/cqadupstack-english
2852
+ config: default
2853
+ split: test
2854
+ revision: ad9991cb51e31e31e430383c75ffb2885547b5f0
2855
  metrics:
2856
  - type: ndcg_at_1
2857
  value: 41.274
 
3135
  value: 47.7803
3136
  - type: main_score
3137
  value: 50.251000000000005
3138
+ - task:
3139
  type: Retrieval
3140
+ dataset:
 
3141
  name: MTEB CQADupstackGamingRetrieval (default)
 
 
3142
  type: mteb/cqadupstack-gaming
3143
+ config: default
3144
+ split: test
3145
+ revision: 4885aa143210c98657558c04aaf3dc47cfb54340
3146
  metrics:
3147
  - type: ndcg_at_1
3148
  value: 47.147
 
3426
  value: 49.977
3427
  - type: main_score
3428
  value: 59.318000000000005
3429
+ - task:
3430
  type: Retrieval
3431
+ dataset:
 
3432
  name: MTEB CQADupstackGisRetrieval (default)
 
 
3433
  type: mteb/cqadupstack-gis
3434
+ config: default
3435
+ split: test
3436
+ revision: 5003b3064772da1887988e05400cf3806fe491f2
3437
  metrics:
3438
  - type: ndcg_at_1
3439
  value: 30.734
 
3717
  value: 41.652899999999995
3718
  - type: main_score
3719
  value: 43.564
3720
+ - task:
3721
  type: Retrieval
3722
+ dataset:
 
3723
  name: MTEB CQADupstackMathematicaRetrieval (default)
 
 
3724
  type: mteb/cqadupstack-mathematica
3725
+ config: default
3726
+ split: test
3727
+ revision: 90fceea13679c63fe563ded68f3b6f06e50061de
3728
  metrics:
3729
  - type: ndcg_at_1
3730
  value: 22.886
 
4008
  value: 29.7639
4009
  - type: main_score
4010
  value: 32.749
4011
+ - task:
4012
  type: Retrieval
4013
+ dataset:
 
4014
  name: MTEB CQADupstackPhysicsRetrieval (default)
 
 
4015
  type: mteb/cqadupstack-physics
4016
+ config: default
4017
+ split: test
4018
+ revision: 79531abbd1fb92d06c6d6315a0cbbbf5bb247ea4
4019
  metrics:
4020
  - type: ndcg_at_1
4021
  value: 38.114
 
4299
  value: 52.337500000000006
4300
  - type: main_score
4301
  value: 48.339999999999996
4302
+ - task:
4303
  type: Retrieval
4304
+ dataset:
 
4305
  name: MTEB CQADupstackProgrammersRetrieval (default)
 
 
4306
  type: mteb/cqadupstack-programmers
4307
+ config: default
4308
+ split: test
4309
+ revision: 6184bc1440d2dbc7612be22b50686b8826d22b32
4310
  metrics:
4311
  - type: ndcg_at_1
4312
  value: 34.247
 
4590
  value: 44.7656
4591
  - type: main_score
4592
  value: 44.065
4593
+ - task:
4594
  type: Retrieval
4595
+ dataset:
 
4596
  name: MTEB CQADupstackRetrieval (default)
 
 
4597
  type: CQADupstackRetrieval_is_a_combined_dataset
4598
+ config: default
4599
+ split: test
4600
+ revision: 160c094312a0e1facb97e55eeddb698c0abe3571
4601
  metrics:
4602
  - type: ndcg_at_1
4603
  value: 33.917750000000005
 
4881
  value: 45.21384166666667
4882
  - type: main_score
4883
  value: 44.29191666666667
4884
+ - task:
4885
  type: Retrieval
4886
+ dataset:
 
4887
  name: MTEB CQADupstackRetrieval (default)
 
 
4888
  type: CQADupstackRetrieval_is_a_combined_dataset
4889
+ config: default
4890
+ split: test
4891
+ revision: CQADupstackRetrieval_is_a_combined_dataset
4892
  metrics:
4893
  - type: main_score
4894
  value: 44.29191666666667
4895
  - type: ndcg_at_10
4896
  value: 44.29191666666667
4897
+ - task:
4898
  type: Retrieval
4899
+ dataset:
 
4900
  name: MTEB CQADupstackStatsRetrieval (default)
 
 
4901
  type: mteb/cqadupstack-stats
4902
+ config: default
4903
+ split: test
4904
+ revision: 65ac3a16b8e91f9cee4c9828cc7c335575432a2a
4905
  metrics:
4906
  - type: ndcg_at_1
4907
  value: 29.141000000000002
 
5185
  value: 51.8107
5186
  - type: main_score
5187
  value: 38.596000000000004
5188
+ - task:
5189
  type: Retrieval
5190
+ dataset:
 
5191
  name: MTEB CQADupstackTexRetrieval (default)
 
 
5192
  type: mteb/cqadupstack-tex
5193
+ config: default
5194
+ split: test
5195
+ revision: 46989137a86843e03a6195de44b09deda022eec7
5196
  metrics:
5197
  - type: ndcg_at_1
5198
  value: 24.054000000000002
 
5476
  value: 38.0747
5477
  - type: main_score
5478
  value: 33.722
5479
+ - task:
5480
  type: Retrieval
5481
+ dataset:
 
5482
  name: MTEB CQADupstackUnixRetrieval (default)
 
 
5483
  type: mteb/cqadupstack-unix
5484
+ config: default
5485
+ split: test
5486
+ revision: 6c6430d3a6d36f8d2a829195bc5dc94d7e063e53
5487
  metrics:
5488
  - type: ndcg_at_1
5489
  value: 35.168
 
5767
  value: 47.6603
5768
  - type: main_score
5769
  value: 46.071
5770
+ - task:
5771
  type: Retrieval
5772
+ dataset:
 
5773
  name: MTEB CQADupstackWebmastersRetrieval (default)
 
 
5774
  type: mteb/cqadupstack-webmasters
5775
+ config: default
5776
+ split: test
5777
+ revision: 160c094312a0e1facb97e55eeddb698c0abe3571
5778
  metrics:
5779
  - type: ndcg_at_1
5780
  value: 33.794000000000004
 
6058
  value: 47.1571
6059
  - type: main_score
6060
  value: 43.832
6061
+ - task:
6062
  type: Retrieval
6063
+ dataset:
 
6064
  name: MTEB CQADupstackWordpressRetrieval (default)
 
 
6065
  type: mteb/cqadupstack-wordpress
6066
+ config: default
6067
+ split: test
6068
+ revision: 4ffe81d471b1924886b33c7567bfb200e9eec5c4
6069
  metrics:
6070
  - type: ndcg_at_1
6071
  value: 26.247999999999998
 
6349
  value: 43.4448
6350
  - type: main_score
6351
  value: 36.889
6352
+ - task:
6353
  type: Retrieval
6354
+ dataset:
 
6355
  name: MTEB ClimateFEVER (default)
 
 
6356
  type: mteb/climate-fever
6357
+ config: default
6358
+ split: test
6359
+ revision: 47f2ac6acb640fc46020b02a5b59fdda04d39380
6360
  metrics:
6361
  - type: ndcg_at_1
6362
  value: 29.837000000000003
 
6640
  value: 19.817
6641
  - type: main_score
6642
  value: 30.263
6643
+ - task:
6644
  type: Retrieval
6645
+ dataset:
 
6646
  name: MTEB CodeFeedbackMT (default)
 
 
6647
  type: CoIR-Retrieval/codefeedback-mt
6648
+ config: default
6649
+ split: test
6650
+ revision: b0f12fa0c0dd67f59c95a5c33d02aeeb4c398c5f
6651
  metrics:
6652
  - type: ndcg_at_1
6653
  value: 27.002
 
6931
  value: 46.646300000000004
6932
  - type: main_score
6933
  value: 37.757000000000005
6934
+ - task:
6935
  type: Retrieval
6936
+ dataset:
 
6937
  name: MTEB CodeFeedbackST (default)
 
 
6938
  type: CoIR-Retrieval/codefeedback-st
6939
+ config: default
6940
+ split: test
6941
+ revision: d213819e87aab9010628da8b73ab4eb337c89340
6942
  metrics:
6943
  - type: ndcg_at_1
6944
  value: 53.335
 
7222
  value: 66.3142
7223
  - type: main_score
7224
  value: 69.425
7225
+ - task:
7226
  type: Retrieval
7227
+ dataset:
 
7228
  name: MTEB CodeSearchNetCCRetrieval (python)
 
 
7229
  type: CoIR-Retrieval/CodeSearchNet-ccr
7230
+ config: python
7231
+ split: test
7232
+ revision: 6e1effa2c03723c5fde48ee912b5ee08d4f211e8
7233
  metrics:
7234
  - type: ndcg_at_1
7235
  value: 39.395
 
7513
  value: 52.254900000000006
7514
  - type: main_score
7515
  value: 53.593999999999994
7516
+ - task:
7517
  type: Retrieval
7518
+ dataset:
 
7519
  name: MTEB CodeSearchNetCCRetrieval (javascript)
 
 
7520
  type: CoIR-Retrieval/CodeSearchNet-ccr
7521
+ config: javascript
7522
+ split: test
7523
+ revision: 6e1effa2c03723c5fde48ee912b5ee08d4f211e8
7524
  metrics:
7525
  - type: ndcg_at_1
7526
  value: 39.593
 
7804
  value: 52.0697
7805
  - type: main_score
7806
  value: 53.1
7807
+ - task:
7808
  type: Retrieval
7809
+ dataset:
 
7810
  name: MTEB CodeSearchNetCCRetrieval (go)
 
 
7811
  type: CoIR-Retrieval/CodeSearchNet-ccr
7812
+ config: go
7813
+ split: test
7814
+ revision: 6e1effa2c03723c5fde48ee912b5ee08d4f211e8
7815
  metrics:
7816
  - type: ndcg_at_1
7817
  value: 30.459999999999997
 
8095
  value: 46.942499999999995
8096
  - type: main_score
8097
  value: 42.094
8098
+ - task:
8099
  type: Retrieval
8100
+ dataset:
 
8101
  name: MTEB CodeSearchNetCCRetrieval (ruby)
 
 
8102
  type: CoIR-Retrieval/CodeSearchNet-ccr
8103
+ config: ruby
8104
+ split: test
8105
+ revision: 6e1effa2c03723c5fde48ee912b5ee08d4f211e8
8106
  metrics:
8107
  - type: ndcg_at_1
8108
  value: 37.827
 
8386
  value: 56.0608
8387
  - type: main_score
8388
  value: 51.686
8389
+ - task:
8390
  type: Retrieval
8391
+ dataset:
 
8392
  name: MTEB CodeSearchNetCCRetrieval (java)
 
 
8393
  type: CoIR-Retrieval/CodeSearchNet-ccr
8394
+ config: java
8395
+ split: test
8396
+ revision: 6e1effa2c03723c5fde48ee912b5ee08d4f211e8
8397
  metrics:
8398
  - type: ndcg_at_1
8399
  value: 39.744
 
8677
  value: 53.286100000000005
8678
  - type: main_score
8679
  value: 52.544000000000004
8680
+ - task:
8681
  type: Retrieval
8682
+ dataset:
 
8683
  name: MTEB CodeSearchNetCCRetrieval (php)
 
 
8684
  type: CoIR-Retrieval/CodeSearchNet-ccr
8685
+ config: php
8686
+ split: test
8687
+ revision: 6e1effa2c03723c5fde48ee912b5ee08d4f211e8
8688
  metrics:
8689
  - type: ndcg_at_1
8690
  value: 29.685
 
8968
  value: 45.535199999999996
8969
  - type: main_score
8970
  value: 41.814
8971
+ - task:
8972
  type: Retrieval
8973
+ dataset:
 
8974
  name: MTEB CodeSearchNetRetrieval (python)
 
 
8975
  type: code-search-net/code_search_net
8976
+ config: python
8977
+ split: test
8978
+ revision: fdc6a9e39575768c27eb8a2a5f702bf846eb4759
8979
  metrics:
8980
  - type: ndcg_at_1
8981
  value: 73.5
 
9259
  value: 71.0712
9260
  - type: main_score
9261
  value: 84.357
9262
+ - task:
9263
  type: Retrieval
9264
+ dataset:
 
9265
  name: MTEB CodeSearchNetRetrieval (javascript)
 
 
9266
  type: code-search-net/code_search_net
9267
+ config: javascript
9268
+ split: test
9269
+ revision: fdc6a9e39575768c27eb8a2a5f702bf846eb4759
9270
  metrics:
9271
  - type: ndcg_at_1
9272
  value: 59.4
 
9550
  value: 70.6705
9551
  - type: main_score
9552
  value: 71.384
9553
+ - task:
9554
  type: Retrieval
9555
+ dataset:
 
9556
  name: MTEB CodeSearchNetRetrieval (go)
 
 
9557
  type: code-search-net/code_search_net
9558
+ config: go
9559
+ split: test
9560
+ revision: fdc6a9e39575768c27eb8a2a5f702bf846eb4759
9561
  metrics:
9562
  - type: ndcg_at_1
9563
  value: 71.39999999999999
 
9841
  value: 72.7192
9842
  - type: main_score
9843
  value: 84.922
9844
+ - task:
9845
  type: Retrieval
9846
+ dataset:
 
9847
  name: MTEB CodeSearchNetRetrieval (ruby)
 
 
9848
  type: code-search-net/code_search_net
9849
+ config: ruby
9850
+ split: test
9851
+ revision: fdc6a9e39575768c27eb8a2a5f702bf846eb4759
9852
  metrics:
9853
  - type: ndcg_at_1
9854
  value: 61.9
 
10132
  value: 69.50460000000001
10133
  - type: main_score
10134
  value: 75.274
10135
+ - task:
10136
  type: Retrieval
10137
+ dataset:
 
10138
  name: MTEB CodeSearchNetRetrieval (java)
 
 
10139
  type: code-search-net/code_search_net
10140
+ config: java
10141
+ split: test
10142
+ revision: fdc6a9e39575768c27eb8a2a5f702bf846eb4759
10143
  metrics:
10144
  - type: ndcg_at_1
10145
  value: 52.6
 
10423
  value: 58.8048
10424
  - type: main_score
10425
  value: 69.447
10426
+ - task:
10427
  type: Retrieval
10428
+ dataset:
 
10429
  name: MTEB CodeSearchNetRetrieval (php)
 
 
10430
  type: code-search-net/code_search_net
10431
+ config: php
10432
+ split: test
10433
+ revision: fdc6a9e39575768c27eb8a2a5f702bf846eb4759
10434
  metrics:
10435
  - type: ndcg_at_1
10436
  value: 57.699999999999996
 
10714
  value: 63.361000000000004
10715
  - type: main_score
10716
  value: 73.455
10717
+ - task:
10718
  type: Retrieval
10719
+ dataset:
 
10720
  name: MTEB CodeTransOceanContest (default)
 
 
10721
  type: CoIR-Retrieval/codetrans-contest
10722
+ config: default
10723
+ split: test
10724
+ revision: 20da4eb20a4b17300c0986ee148c90867a7f2a4d
10725
  metrics:
10726
  - type: ndcg_at_1
10727
  value: 46.154
 
11005
  value: 77.0939
11006
  - type: main_score
11007
  value: 57.475
11008
+ - task:
11009
  type: Retrieval
11010
+ dataset:
 
11011
  name: MTEB CodeTransOceanDL (default)
 
 
11012
  type: CoIR-Retrieval/codetrans-dl
11013
+ config: default
11014
+ split: test
11015
+ revision: 281562cb8a1265ab5c0824bfa6ddcd9b0a15618f
11016
  metrics:
11017
  - type: ndcg_at_1
11018
  value: 8.889
 
11296
  value: 18.496499999999997
11297
  - type: main_score
11298
  value: 26.888
11299
+ - task:
11300
  type: Retrieval
11301
+ dataset:
 
11302
  name: MTEB CosQA (default)
 
 
11303
  type: CoIR-Retrieval/cosqa
11304
+ config: default
11305
+ split: test
11306
+ revision: bc5efb7e9d437246ce393ed19d772e08e4a79535
11307
  metrics:
11308
  - type: ndcg_at_1
11309
  value: 15.4
 
11587
  value: 29.583
11588
  - type: main_score
11589
  value: 35.449999999999996
11590
+ - task:
11591
  type: Retrieval
11592
+ dataset:
 
11593
  name: MTEB DBPedia (default)
 
 
11594
  type: mteb/dbpedia
11595
+ config: default
11596
+ split: test
11597
+ revision: c0f706b76e590d620bd6618b3ca8efdd34e2d659
11598
  metrics:
11599
  - type: ndcg_at_1
11600
  value: 51.37500000000001
 
11878
  value: 42.4844
11879
  - type: main_score
11880
  value: 35.96
11881
+ - task:
11882
+ type: Classification
11883
+ dataset:
 
11884
  name: MTEB EmotionClassification (default)
 
 
11885
  type: mteb/emotion
11886
+ config: default
11887
+ split: test
11888
+ revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37
11889
  metrics:
11890
  - type: accuracy
11891
  value: 38.795
 
11895
  value: 40.7945
11896
  - type: main_score
11897
  value: 38.795
11898
+ - task:
11899
+ type: Retrieval
11900
+ dataset:
 
11901
  name: MTEB FEVER (default)
 
 
11902
  type: mteb/fever
11903
+ config: default
11904
+ split: test
11905
+ revision: bea83ef9e8fb933d90a2f1d5515737465d613e12
11906
  metrics:
11907
  - type: ndcg_at_1
11908
  value: 79.08800000000001
 
12186
  value: 62.96469999999999
12187
  - type: main_score
12188
  value: 85.528
12189
+ - task:
12190
  type: Retrieval
12191
+ dataset:
 
12192
  name: MTEB FiQA2018 (default)
 
 
12193
  type: mteb/fiqa
12194
+ config: default
12195
+ split: test
12196
+ revision: 27a168819829fe9bcd655c2df245fb19452e8e06
12197
  metrics:
12198
  - type: ndcg_at_1
12199
  value: 35.494
 
12477
  value: 43.4086
12478
  - type: main_score
12479
  value: 36.851
12480
+ - task:
12481
  type: Retrieval
12482
+ dataset:
 
12483
  name: MTEB HotpotQA (default)
 
 
12484
  type: mteb/hotpotqa
12485
+ config: default
12486
+ split: test
12487
+ revision: ab518f4d6fcca38d87c25209f94beba119d02014
12488
  metrics:
12489
  - type: ndcg_at_1
12490
  value: 73.531
 
12768
  value: 66.328
12769
  - type: main_score
12770
  value: 62.918
12771
+ - task:
12772
+ type: Classification
12773
+ dataset:
 
12774
  name: MTEB ImdbClassification (default)
 
 
12775
  type: mteb/imdb
12776
+ config: default
12777
+ split: test
12778
+ revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7
12779
  metrics:
12780
  - type: accuracy
12781
  value: 62.2348
 
12789
  value: 57.750800000000005
12790
  - type: main_score
12791
  value: 62.2348
12792
+ - task:
12793
+ type: Retrieval
12794
+ dataset:
 
12795
  name: MTEB MSMARCO (default)
 
 
12796
  type: mteb/msmarco
12797
+ config: default
12798
+ split: dev
12799
+ revision: c5a29a104738b98a9e76336939199e264163d4a0
12800
  metrics:
12801
  - type: ndcg_at_1
12802
  value: 15.085999999999999
 
13080
  value: 26.801000000000002
13081
  - type: main_score
13082
  value: 30.711
13083
+ - task:
13084
+ type: Classification
13085
+ dataset:
 
13086
  name: MTEB MTOPDomainClassification (en)
 
 
13087
  type: mteb/mtop_domain
13088
+ config: en
13089
+ split: test
13090
+ revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf
13091
  metrics:
13092
  - type: accuracy
13093
  value: 89.4505
 
13097
  value: 89.442
13098
  - type: main_score
13099
  value: 89.4505
13100
+ - task:
13101
  type: Classification
13102
+ dataset:
 
13103
  name: MTEB MTOPIntentClassification (en)
 
 
13104
  type: mteb/mtop_intent
13105
+ config: en
13106
+ split: test
13107
+ revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba
13108
  metrics:
13109
  - type: accuracy
13110
  value: 56.846799999999995
 
13114
  value: 58.797999999999995
13115
  - type: main_score
13116
  value: 56.846799999999995
13117
+ - task:
13118
  type: Classification
13119
+ dataset:
 
13120
  name: MTEB MassiveIntentClassification (en)
 
 
13121
  type: mteb/amazon_massive_intent
13122
+ config: en
13123
+ split: test
13124
+ revision: 4672e20407010da34463acc759c162ca9734bca6
13125
  metrics:
13126
  - type: accuracy
13127
  value: 64.768
 
13131
  value: 63.67
13132
  - type: main_score
13133
  value: 64.768
13134
+ - task:
13135
  type: Classification
13136
+ dataset:
 
13137
  name: MTEB MassiveScenarioClassification (en)
 
 
13138
  type: mteb/amazon_massive_scenario
13139
+ config: en
13140
+ split: test
13141
+ revision: fad2c6e8459f9e1c45d9315f4953d921437d70f8
13142
  metrics:
13143
  - type: accuracy
13144
  value: 71.3416
 
13148
  value: 71.19680000000001
13149
  - type: main_score
13150
  value: 71.3416
13151
+ - task:
13152
+ type: Clustering
13153
+ dataset:
 
13154
  name: MTEB MedrxivClusteringP2P (default)
 
 
13155
  type: mteb/medrxiv-clustering-p2p
13156
+ config: default
13157
+ split: test
13158
+ revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73
13159
  metrics:
13160
  - type: v_measure
13161
  value: 32.5684
 
13163
  value: 1.6362999999999999
13164
  - type: main_score
13165
  value: 32.5684
13166
+ - task:
13167
  type: Clustering
13168
+ dataset:
 
13169
  name: MTEB MedrxivClusteringS2S (default)
 
 
13170
  type: mteb/medrxiv-clustering-s2s
13171
+ config: default
13172
+ split: test
13173
+ revision: 35191c8c0dca72d8ff3efcd72aa802307d469663
13174
  metrics:
13175
  - type: v_measure
13176
  value: 31.551299999999998
 
13178
  value: 1.7208999999999999
13179
  - type: main_score
13180
  value: 31.551299999999998
13181
+ - task:
13182
+ type: Reranking
13183
+ dataset:
 
13184
  name: MTEB MindSmallReranking (default)
 
 
13185
  type: mteb/mind_small
13186
+ config: default
13187
+ split: test
13188
+ revision: 59042f120c80e8afa9cdbb224f67076cec0fc9a7
13189
  metrics:
13190
  - type: map
13191
  value: 30.883
 
13205
  value: 13.2767
13206
  - type: main_score
13207
  value: 30.883
13208
+ - task:
13209
+ type: Retrieval
13210
+ dataset:
 
13211
  name: MTEB NFCorpus (default)
 
 
13212
  type: mteb/nfcorpus
13213
+ config: default
13214
+ split: test
13215
+ revision: ec0fa4fe99da2ff19ca1214b7966684033a58814
13216
  metrics:
13217
  - type: ndcg_at_1
13218
  value: 41.486000000000004
 
13496
  value: 33.1177
13497
  - type: main_score
13498
  value: 33.737
13499
+ - task:
13500
  type: Retrieval
13501
+ dataset:
 
13502
  name: MTEB NQ (default)
 
 
13503
  type: mteb/nq
13504
+ config: default
13505
+ split: test
13506
+ revision: b774495ed302d8c44a3a7ea25c90dbce03968f31
13507
  metrics:
13508
  - type: ndcg_at_1
13509
  value: 32.793
 
13787
  value: 27.4444
13788
  - type: main_score
13789
  value: 51.63100000000001
13790
+ - task:
13791
  type: Retrieval
13792
+ dataset:
 
13793
  name: MTEB QuoraRetrieval (default)
 
 
13794
  type: mteb/quora
13795
+ config: default
13796
+ split: test
13797
+ revision: e4e08e0b7dbe3c8700f0daef558ff32256715259
13798
  metrics:
13799
  - type: ndcg_at_1
13800
  value: 79.36999999999999
 
14078
  value: 75.69369999999999
14079
  - type: main_score
14080
  value: 86.696
14081
+ - task:
14082
+ type: Clustering
14083
+ dataset:
 
14084
  name: MTEB RedditClustering (default)
 
 
14085
  type: mteb/reddit-clustering
14086
+ config: default
14087
+ split: test
14088
+ revision: 24640382cdbf8abc73003fb0fa6d111a705499eb
14089
  metrics:
14090
  - type: v_measure
14091
  value: 50.019999999999996
 
14093
  value: 4.5914
14094
  - type: main_score
14095
  value: 50.019999999999996
14096
+ - task:
14097
  type: Clustering
14098
+ dataset:
 
14099
  name: MTEB RedditClusteringP2P (default)
 
 
14100
  type: mteb/reddit-clustering-p2p
14101
+ config: default
14102
+ split: test
14103
+ revision: 385e3cb46b4cfa89021f56c4380204149d0efe33
14104
  metrics:
14105
  - type: v_measure
14106
  value: 53.9756
 
14108
  value: 11.6573
14109
  - type: main_score
14110
  value: 53.9756
14111
+ - task:
14112
+ type: Retrieval
14113
+ dataset:
 
14114
  name: MTEB SCIDOCS (default)
 
 
14115
  type: mteb/scidocs
14116
+ config: default
14117
+ split: test
14118
+ revision: f8c2fcf00f625baaa80f62ec5bd9e1fff3b8ae88
14119
  metrics:
14120
  - type: ndcg_at_1
14121
  value: 24.6
 
14399
  value: 20.8635
14400
  - type: main_score
14401
  value: 22.542
14402
+ - task:
14403
+ type: STS
14404
+ dataset:
 
14405
  name: MTEB SICK-R (default)
 
 
14406
  type: mteb/sickr-sts
14407
+ config: default
14408
+ split: test
14409
+ revision: 20a6d6f312dd54037fe07a32d58e5e168867909d
14410
  metrics:
14411
  - type: pearson
14412
  value: 77.4874
 
14426
  value: 68.79809999999999
14427
  - type: main_score
14428
  value: 68.79809999999999
14429
+ - task:
14430
  type: STS
14431
+ dataset:
 
14432
  name: MTEB STS12 (default)
 
 
14433
  type: mteb/sts12-sts
14434
+ config: default
14435
+ split: test
14436
+ revision: a0d554a64d88156834ff5ae9920b964011b16384
14437
  metrics:
14438
  - type: pearson
14439
  value: 67.8391
 
14453
  value: 64.7722
14454
  - type: main_score
14455
  value: 64.77380000000001
14456
+ - task:
14457
  type: STS
14458
+ dataset:
 
14459
  name: MTEB STS13 (default)
 
 
14460
  type: mteb/sts13-sts
14461
+ config: default
14462
+ split: test
14463
+ revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca
14464
  metrics:
14465
  - type: pearson
14466
  value: 78.8177
 
14480
  value: 79.3253
14481
  - type: main_score
14482
  value: 79.3253
14483
+ - task:
14484
  type: STS
14485
+ dataset:
 
14486
  name: MTEB STS14 (default)
 
 
14487
  type: mteb/sts14-sts
14488
+ config: default
14489
+ split: test
14490
+ revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375
14491
  metrics:
14492
  - type: pearson
14493
  value: 75.6791
 
14507
  value: 70.1701
14508
  - type: main_score
14509
  value: 70.1701
14510
+ - task:
14511
  type: STS
14512
+ dataset:
 
14513
  name: MTEB STS15 (default)
 
 
14514
  type: mteb/sts15-sts
14515
+ config: default
14516
+ split: test
14517
+ revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3
14518
  metrics:
14519
  - type: pearson
14520
  value: 80.4413
 
14534
  value: 82.0343
14535
  - type: main_score
14536
  value: 82.0343
14537
+ - task:
14538
  type: STS
14539
+ dataset:
 
14540
  name: MTEB STS16 (default)
 
 
14541
  type: mteb/sts16-sts
14542
+ config: default
14543
+ split: test
14544
+ revision: 4d8694f8f0e0100860b497b999b3dbed754a0513
14545
  metrics:
14546
  - type: pearson
14547
  value: 77.172
 
14561
  value: 78.9633
14562
  - type: main_score
14563
  value: 78.9633
14564
+ - task:
14565
  type: STS
14566
+ dataset:
 
14567
  name: MTEB STS17 (en-en)
 
 
14568
  type: mteb/sts17-crosslingual-sts
14569
+ config: en-en
14570
+ split: test
14571
+ revision: faeb762787bd10488a50c8b5be4a3b82e411949c
14572
  metrics:
14573
  - type: pearson
14574
  value: 83.5117
 
14588
  value: 84.64970000000001
14589
  - type: main_score
14590
  value: 84.64970000000001
14591
+ - task:
14592
  type: STS
14593
+ dataset:
 
14594
  name: MTEB STS17 (es-en)
 
 
14595
  type: mteb/sts17-crosslingual-sts
14596
+ config: es-en
14597
+ split: test
14598
+ revision: faeb762787bd10488a50c8b5be4a3b82e411949c
14599
  metrics:
14600
  - type: pearson
14601
  value: 29.0052
 
14615
  value: 30.640299999999996
14616
  - type: main_score
14617
  value: 30.640299999999996
14618
+ - task:
14619
  type: STS
14620
+ dataset:
 
14621
  name: MTEB STS17 (nl-en)
 
 
14622
  type: mteb/sts17-crosslingual-sts
14623
+ config: nl-en
14624
+ split: test
14625
+ revision: faeb762787bd10488a50c8b5be4a3b82e411949c
14626
  metrics:
14627
  - type: pearson
14628
  value: 42.0755
 
14642
  value: 39.7565
14643
  - type: main_score
14644
  value: 39.763999999999996
14645
+ - task:
14646
  type: STS
14647
+ dataset:
 
14648
  name: MTEB STS17 (en-de)
 
 
14649
  type: mteb/sts17-crosslingual-sts
14650
+ config: en-de
14651
+ split: test
14652
+ revision: faeb762787bd10488a50c8b5be4a3b82e411949c
14653
  metrics:
14654
  - type: pearson
14655
  value: 44.2318
 
14669
  value: 46.5518
14670
  - type: main_score
14671
  value: 46.5518
14672
+ - task:
14673
  type: STS
14674
+ dataset:
 
14675
  name: MTEB STS17 (fr-en)
 
 
14676
  type: mteb/sts17-crosslingual-sts
14677
+ config: fr-en
14678
+ split: test
14679
+ revision: faeb762787bd10488a50c8b5be4a3b82e411949c
14680
  metrics:
14681
  - type: pearson
14682
  value: 36.716100000000004
 
14696
  value: 34.6968
14697
  - type: main_score
14698
  value: 34.6968
14699
+ - task:
14700
  type: STS
14701
+ dataset:
 
14702
  name: MTEB STS17 (en-ar)
 
 
14703
  type: mteb/sts17-crosslingual-sts
14704
+ config: en-ar
14705
+ split: test
14706
+ revision: faeb762787bd10488a50c8b5be4a3b82e411949c
14707
  metrics:
14708
  - type: pearson
14709
  value: 21.2825
 
14723
  value: 17.6922
14724
  - type: main_score
14725
  value: 17.6922
14726
+ - task:
14727
  type: STS
14728
+ dataset:
 
14729
  name: MTEB STS17 (it-en)
 
 
14730
  type: mteb/sts17-crosslingual-sts
14731
+ config: it-en
14732
+ split: test
14733
+ revision: faeb762787bd10488a50c8b5be4a3b82e411949c
14734
  metrics:
14735
  - type: pearson
14736
  value: 32.1584
 
14750
  value: 27.9254
14751
  - type: main_score
14752
  value: 27.9254
14753
+ - task:
14754
  type: STS
14755
+ dataset:
 
14756
  name: MTEB STS17 (en-tr)
 
 
14757
  type: mteb/sts17-crosslingual-sts
14758
+ config: en-tr
14759
+ split: test
14760
+ revision: faeb762787bd10488a50c8b5be4a3b82e411949c
14761
  metrics:
14762
  - type: pearson
14763
  value: 21.0842
 
14777
  value: 18.5115
14778
  - type: main_score
14779
  value: 18.5115
14780
+ - task:
14781
  type: STS
14782
+ dataset:
 
14783
  name: MTEB STS22 (en)
 
 
14784
  type: mteb/sts22-crosslingual-sts
14785
+ config: en
14786
+ split: test
14787
+ revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3
14788
  metrics:
14789
  - type: pearson
14790
  value: 66.9563
 
14804
  value: 67.4747
14805
  - type: main_score
14806
  value: 67.4747
14807
+ - task:
14808
  type: STS
14809
+ dataset:
 
14810
  name: MTEB STS22 (de-en)
 
 
14811
  type: mteb/sts22-crosslingual-sts
14812
+ config: de-en
14813
+ split: test
14814
+ revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3
14815
  metrics:
14816
  - type: pearson
14817
  value: 56.3095
 
14831
  value: 54.1005
14832
  - type: main_score
14833
  value: 54.1005
14834
+ - task:
14835
  type: STS
14836
+ dataset:
 
14837
  name: MTEB STS22 (es-en)
 
 
14838
  type: mteb/sts22-crosslingual-sts
14839
+ config: es-en
14840
+ split: test
14841
+ revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3
14842
  metrics:
14843
  - type: pearson
14844
  value: 62.0575
 
14858
  value: 66.9527
14859
  - type: main_score
14860
  value: 66.9527
14861
+ - task:
14862
  type: STS
14863
+ dataset:
 
14864
  name: MTEB STS22 (pl-en)
 
 
14865
  type: mteb/sts22-crosslingual-sts
14866
+ config: pl-en
14867
+ split: test
14868
+ revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3
14869
  metrics:
14870
  - type: pearson
14871
  value: 68.42439999999999
 
14885
  value: 69.0444
14886
  - type: main_score
14887
  value: 69.0444
14888
+ - task:
14889
  type: STS
14890
+ dataset:
 
14891
  name: MTEB STS22 (zh-en)
 
 
14892
  type: mteb/sts22-crosslingual-sts
14893
+ config: zh-en
14894
+ split: test
14895
+ revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3
14896
  metrics:
14897
  - type: pearson
14898
  value: 34.164699999999996
 
14912
  value: 36.1776
14913
  - type: main_score
14914
  value: 36.1776
14915
+ - task:
14916
  type: STS
14917
+ dataset:
 
14918
  name: MTEB STSBenchmark (default)
 
 
14919
  type: mteb/stsbenchmark-sts
14920
+ config: default
14921
+ split: test
14922
+ revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831
14923
  metrics:
14924
  - type: pearson
14925
  value: 78.0802
 
14939
  value: 78.0444
14940
  - type: main_score
14941
  value: 78.0444
14942
+ - task:
14943
+ type: Reranking
14944
+ dataset:
 
14945
  name: MTEB SciDocsRR (default)
 
 
14946
  type: mteb/scidocs-reranking
14947
+ config: default
14948
+ split: test
14949
+ revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab
14950
  metrics:
14951
  - type: map
14952
  value: 86.4489
 
14966
  value: 42.9429
14967
  - type: main_score
14968
  value: 86.4489
14969
+ - task:
14970
+ type: Retrieval
14971
+ dataset:
 
14972
  name: MTEB SciFact (default)
 
 
14973
  type: mteb/scifact
14974
+ config: default
14975
+ split: test
14976
+ revision: 0228b52cf27578f30900b9e5271d331663a030d7
14977
  metrics:
14978
  - type: ndcg_at_1
14979
  value: 59.333000000000006
 
15257
  value: 75.2553
15258
  - type: main_score
15259
  value: 71.27
15260
+ - task:
15261
+ type: PairClassification
15262
+ dataset:
 
15263
  name: MTEB SprintDuplicateQuestions (default)
 
 
15264
  type: mteb/sprintduplicatequestions-pairclassification
15265
+ config: default
15266
+ split: test
15267
+ revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46
15268
  metrics:
15269
  - type: similarity_accuracy
15270
  value: 99.7604
 
15348
  value: 94.17
15349
  - type: main_score
15350
  value: 94.17
15351
+ - task:
15352
+ type: Clustering
15353
+ dataset:
 
15354
  name: MTEB StackExchangeClustering (default)
 
 
15355
  type: mteb/stackexchange-clustering
15356
+ config: default
15357
+ split: test
15358
+ revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259
15359
  metrics:
15360
  - type: v_measure
15361
  value: 64.6589
 
15363
  value: 4.734
15364
  - type: main_score
15365
  value: 64.6589
15366
+ - task:
15367
  type: Clustering
15368
+ dataset:
 
15369
  name: MTEB StackExchangeClusteringP2P (default)
 
 
15370
  type: mteb/stackexchange-clustering-p2p
15371
+ config: default
15372
+ split: test
15373
+ revision: 815ca46b2622cec33ccafc3735d572c266efdb44
15374
  metrics:
15375
  - type: v_measure
15376
  value: 32.9388
 
15378
  value: 1.6312
15379
  - type: main_score
15380
  value: 32.9388
15381
+ - task:
15382
+ type: Reranking
15383
+ dataset:
 
15384
  name: MTEB StackOverflowDupQuestions (default)
 
 
15385
  type: mteb/stackoverflowdupquestions-reranking
15386
+ config: default
15387
+ split: test
15388
+ revision: e185fbe320c72810689fc5848eb6114e1ef5ec69
15389
  metrics:
15390
  - type: map
15391
  value: 52.645399999999995
 
15405
  value: 39.409499999999994
15406
  - type: main_score
15407
  value: 52.645399999999995
15408
+ - task:
15409
+ type: Retrieval
15410
+ dataset:
 
15411
  name: MTEB StackOverflowQA (default)
 
 
15412
  type: CoIR-Retrieval/stackoverflow-qa
15413
+ config: default
15414
+ split: test
15415
+ revision: db8f169f3894c14a00251061f957b2063eef2bd5
15416
  metrics:
15417
  - type: ndcg_at_1
15418
  value: 74.97500000000001
 
15696
  value: 82.238
15697
  - type: main_score
15698
  value: 83.92699999999999
15699
+ - task:
15700
+ type: Summarization
15701
+ dataset:
 
15702
  name: MTEB SummEval (default)
 
 
15703
  type: mteb/summeval
15704
+ config: default
15705
+ split: test
15706
+ revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c
15707
  metrics:
15708
  - type: pearson
15709
  value: 29.8395
 
15719
  value: 29.8395
15720
  - type: main_score
15721
  value: 29.383
15722
+ - task:
15723
+ type: Retrieval
15724
+ dataset:
 
15725
  name: MTEB SyntheticText2SQL (default)
 
 
15726
  type: CoIR-Retrieval/synthetic-text2sql
15727
+ config: default
15728
+ split: test
15729
+ revision: 686b87296c3a0191b5d9415a00526c62db9fce09
15730
  metrics:
15731
  - type: ndcg_at_1
15732
  value: 4.222
 
16010
  value: -52.9095
16011
  - type: main_score
16012
  value: 44.775
16013
+ - task:
16014
  type: Retrieval
16015
+ dataset:
 
16016
  name: MTEB TRECCOVID (default)
 
 
16017
  type: mteb/trec-covid
16018
+ config: default
16019
+ split: test
16020
+ revision: bb9466bac8153a0349341eb1b22e06409e78ef4e
16021
  metrics:
16022
  - type: ndcg_at_1
16023
  value: 70.0
 
16301
  value: -5.6598999999999995
16302
  - type: main_score
16303
  value: 63.098
16304
+ - task:
16305
  type: Retrieval
16306
+ dataset:
 
16307
  name: MTEB Touche2020 (default)
 
 
16308
  type: mteb/touche2020
16309
+ config: default
16310
+ split: test
16311
+ revision: a34f9a33db75fa0cbb21bb5cfc3dae8dc8bec93f
16312
  metrics:
16313
  - type: ndcg_at_1
16314
  value: 23.469
 
16592
  value: -0.7001000000000001
16593
  - type: main_score
16594
  value: 24.029
16595
+ - task:
16596
+ type: Classification
16597
+ dataset:
 
16598
  name: MTEB ToxicConversationsClassification (default)
 
 
16599
  type: mteb/toxic_conversations_50k
16600
+ config: default
16601
+ split: test
16602
+ revision: edfaf9da55d3dd50d43143d90c1ac476895ae6de
16603
  metrics:
16604
  - type: accuracy
16605
  value: 62.9395
 
16613
  value: 10.306600000000001
16614
  - type: main_score
16615
  value: 62.9395
16616
+ - task:
16617
  type: Classification
16618
+ dataset:
 
16619
  name: MTEB TweetSentimentExtractionClassification (default)
 
 
16620
  type: mteb/tweet_sentiment_extraction
16621
+ config: default
16622
+ split: test
16623
+ revision: d604517c81ca91fe16a244d1248fc021f9ecee7a
16624
  metrics:
16625
  - type: accuracy
16626
  value: 52.8721
 
16630
  value: 52.4319
16631
  - type: main_score
16632
  value: 52.8721
16633
+ - task:
16634
+ type: Clustering
16635
+ dataset:
 
16636
  name: MTEB TwentyNewsgroupsClustering (default)
 
 
16637
  type: mteb/twentynewsgroups-clustering
16638
+ config: default
16639
+ split: test
16640
+ revision: 6125ec4e24fa026cec8a478383ee943acfbd5449
16641
  metrics:
16642
  - type: v_measure
16643
  value: 44.9227
 
16645
  value: 1.1638000000000002
16646
  - type: main_score
16647
  value: 44.9227
16648
+ - task:
16649
+ type: PairClassification
16650
+ dataset:
 
16651
  name: MTEB TwitterSemEval2015 (default)
 
 
16652
  type: mteb/twittersemeval2015-pairclassification
16653
+ config: default
16654
+ split: test
16655
+ revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1
16656
  metrics:
16657
  - type: similarity_accuracy
16658
  value: 82.04090000000001
 
16736
  value: 60.0317
16737
  - type: main_score
16738
  value: 60.0317
16739
+ - task:
16740
  type: PairClassification
16741
+ dataset:
 
16742
  name: MTEB TwitterURLCorpus (default)
 
 
16743
  type: mteb/twitterurlcorpus-pairclassification
16744
+ config: default
16745
+ split: test
16746
+ revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf
16747
  metrics:
16748
  - type: similarity_accuracy
16749
  value: 87.3035
 
16827
  value: 82.5792
16828
  - type: main_score
16829
  value: 82.5792
 
 
 
16830
  ---
16831
  # Granite-Embedding-30m-English
16832
 
onnx/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": true,
3
+ "architectures": [
4
+ "RobertaModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "export_model_type": "transformer",
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 384,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 1536,
16
+ "layer_norm_eps": 1e-12,
17
+ "max_position_embeddings": 514,
18
+ "model_type": "roberta",
19
+ "num_attention_heads": 12,
20
+ "num_hidden_layers": 6,
21
+ "pad_token_id": 1,
22
+ "position_embedding_type": "absolute",
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.51.3",
25
+ "type_vocab_size": 2,
26
+ "use_cache": true,
27
+ "vocab_size": 50265
28
+ }
onnx/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b53a40f4b51e0308fc671fa29ad2f068695f748bc7a4f9412fe5dbb1b6509a4c
3
+ size 120769539
onnx/special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
onnx/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
onnx/tokenizer_config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": true,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "additional_special_tokens": [],
46
+ "bos_token": "<s>",
47
+ "clean_up_tokenization_spaces": true,
48
+ "cls_token": "<s>",
49
+ "eos_token": "</s>",
50
+ "errors": "replace",
51
+ "extra_special_tokens": {},
52
+ "mask_token": "<mask>",
53
+ "model_max_length": 512,
54
+ "pad_token": "<pad>",
55
+ "sep_token": "</s>",
56
+ "tokenizer_class": "RobertaTokenizer",
57
+ "trim_offsets": true,
58
+ "unk_token": "<unk>"
59
+ }
onnx/vocab.json ADDED
The diff for this file is too large to render. See raw diff