yxc97 commited on
Commit
62a2f1c
·
verified ·
1 Parent(s): d42a00c

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +16 -0
  2. README.md +67 -0
  3. examples/AutoCls2D_Cifar100/Baseline/experiment.py +217 -0
  4. examples/AutoCls2D_Cifar100/Baseline/final_info.json +1 -0
  5. examples/AutoCls2D_Cifar100/Baseline/launcher.sh +7 -0
  6. examples/AutoCls2D_Cifar100/HARCNet/experiment.py +326 -0
  7. examples/AutoCls2D_Cifar100/HARCNet/harcnet.py +193 -0
  8. examples/AutoCls2D_Cifar100/HARCNet/idea.json +7 -0
  9. examples/AutoCls2D_Cifar100/HARCNet/launcher.sh +6 -0
  10. examples/AutoCls2D_Cifar100/HARCNet/res/best.pth +3 -0
  11. examples/AutoCls2D_Cifar100/HARCNet/res/final_info.json +1 -0
  12. examples/AutoCls3D_ModelNet40/Baseline/data_transforms.py +37 -0
  13. examples/AutoCls3D_ModelNet40/Baseline/experiment.py +430 -0
  14. examples/AutoCls3D_ModelNet40/Baseline/final_info.json +9 -0
  15. examples/AutoCls3D_ModelNet40/Baseline/launcher.sh +5 -0
  16. examples/AutoCls3D_ModelNet40/Baseline/metrics.py +311 -0
  17. examples/AutoCls3D_ModelNet40/HIRE-Net/data_transforms.py +37 -0
  18. examples/AutoCls3D_ModelNet40/HIRE-Net/experiment.py +565 -0
  19. examples/AutoCls3D_ModelNet40/HIRE-Net/idea.json +7 -0
  20. examples/AutoCls3D_ModelNet40/HIRE-Net/launcher.sh +5 -0
  21. examples/AutoCls3D_ModelNet40/HIRE-Net/metrics.py +311 -0
  22. examples/AutoCls3D_ModelNet40/HIRE-Net/res/best.pth +3 -0
  23. examples/AutoCls3D_ModelNet40/HIRE-Net/res/final_info.json +1 -0
  24. examples/AutoClsSST_SST-2/Baseline/experiment.py +490 -0
  25. examples/AutoClsSST_SST-2/Baseline/final_info.json +1 -0
  26. examples/AutoClsSST_SST-2/Baseline/launcher.sh +1 -0
  27. examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/experiment.py +744 -0
  28. examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/idea.json +7 -0
  29. examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/launcher.sh +1 -0
  30. examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/psycholinguistic_utils.py +472 -0
  31. examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/res/config.json +23 -0
  32. examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/res/final_info.json +9 -0
  33. examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/res/output/adversarial_results_epoch_1.json +1 -0
  34. examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/res/output/adversarial_results_epoch_2.json +1 -0
  35. examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/res/output/adversarial_results_epoch_3.json +1 -0
  36. examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/res/output/best.pth.tar +3 -0
  37. examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/res/output/test_prediction_epoch_1.csv +1822 -0
  38. examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/res/output/test_prediction_epoch_2.csv +1822 -0
  39. examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/res/output/test_prediction_epoch_3.csv +1822 -0
  40. examples/AutoEAP_UMI-STARR-seq/Baseline/config/config-conv-117.json +22 -0
  41. examples/AutoEAP_UMI-STARR-seq/Baseline/experiment.py +206 -0
  42. examples/AutoEAP_UMI-STARR-seq/Baseline/final_info.json +8 -0
  43. examples/AutoEAP_UMI-STARR-seq/Baseline/launcher.sh +1 -0
  44. examples/AutoEAP_UMI-STARR-seq/HyenaMSTA+/config/config-conv-117.json +22 -0
  45. examples/AutoEAP_UMI-STARR-seq/HyenaMSTA+/experiment.py +241 -0
  46. examples/AutoEAP_UMI-STARR-seq/HyenaMSTA+/hyenamsta_model.py +358 -0
  47. examples/AutoEAP_UMI-STARR-seq/HyenaMSTA+/idea.json +7 -0
  48. examples/AutoEAP_UMI-STARR-seq/HyenaMSTA+/launcher.sh +1 -0
  49. examples/AutoEAP_UMI-STARR-seq/HyenaMSTA+/res/final_info.json +8 -0
  50. examples/AutoEAP_UMI-STARR-seq/HyenaMSTA+/res/hyenamsta_plus.h5 +3 -0
.gitattributes CHANGED
@@ -33,3 +33,19 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ examples/AutoPCDet_Once/Baseline/pcdet/ops/bev_pool/bev_pool_ext.cpython-39-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
37
+ examples/AutoPCDet_Once/Baseline/pcdet/ops/ingroup_inds/ingroup_inds_cuda.cpython-39-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
38
+ examples/AutoPCDet_Once/Baseline/pcdet/ops/iou3d_nms/iou3d_nms_cuda.cpython-39-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
39
+ examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_batch/pointnet2_batch_cuda.cpython-39-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
40
+ examples/AutoPCDet_Once/Baseline/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_stack_cuda.cpython-39-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
41
+ examples/AutoPCDet_Once/Baseline/pcdet/ops/roiaware_pool3d/roiaware_pool3d_cuda.cpython-39-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
42
+ examples/AutoPCDet_Once/Baseline/pcdet/ops/roipoint_pool3d/roipoint_pool3d_cuda.cpython-39-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
43
+ examples/AutoPCDet_Once/SARA3D/pcdet/ops/bev_pool/bev_pool_ext.cpython-39-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
44
+ examples/AutoPCDet_Once/SARA3D/pcdet/ops/ingroup_inds/ingroup_inds_cuda.cpython-39-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
45
+ examples/AutoPCDet_Once/SARA3D/pcdet/ops/iou3d_nms/iou3d_nms_cuda.cpython-39-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
46
+ examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_batch/pointnet2_batch_cuda.cpython-39-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
47
+ examples/AutoPCDet_Once/SARA3D/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_stack_cuda.cpython-39-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
48
+ examples/AutoPCDet_Once/SARA3D/pcdet/ops/roiaware_pool3d/roiaware_pool3d_cuda.cpython-39-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
49
+ examples/AutoPCDet_Once/SARA3D/pcdet/ops/roipoint_pool3d/roipoint_pool3d_cuda.cpython-39-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
50
+ images/framework.png filter=lfs diff=lfs merge=lfs -text
51
+ images/novelseek.png filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # NovelSeek - When Agent Becomes the Scientist – Building Closed-Loop System from Hypothesis to Verification
2
+
3
+ [[ Paper 📓 ]](https://github.com/Alpha-Innovator/NovelSeek) [[ Website 🏠 ]](https://github.com/Alpha-Innovator/NovelSeek) [[ NovelSeek Examples 🤗 ]](https://huggingface.co/U4R/NovelSeek)
4
+
5
+ <i>
6
+ From One Idea to Autonomous Experimentation
7
+ </i>
8
+ </div>
9
+
10
+ ## 📖 Overview
11
+
12
+ ![NovelSeek](/images/novelseek.png)
13
+
14
+ NovelSeek can support **12** types of scientific research tasks ranging from the AI field to the science field, including reaction yield prediction, molecular dynamics, power flow estimation, time series forecasting, transcription prediction, enhancer activity prediction, sentiment classification, 2D image classification, 3D point classification, 2D semantic segmentation, 3D autonomous driving, large vision-language model fine-tuning.
15
+
16
+ ## 🌟 Core Features
17
+
18
+ ![Framework](/images/framework.png)
19
+
20
+ NovelSeek covers three main capabilities: (1) **Self-evolving idea generation with human-interactive feedback**, (2) **Idea-to-methodology construction**, and (3) **Evolutionary experimental planning and execution**. NovelSeek is a unified, closed-loop multi-agent system designed to automate and accelerate innovative research across scientific domains. Through intelligent agent collaboration, NovelSeek enables **end-to-end automation** from idea generation and methodology construction to experimental execution, dramatically enhancing research efficiency and creativity.
21
+
22
+ ### 💡 Self-Evolving Idea Generation with Human-Interactive Feedback
23
+ - Autonomous generation, selection, and evolution of innovative research ideas through multi-agent collaboration
24
+ - Supports interactive human feedback, enabling continuous refinement of ideas with expert insights
25
+ - Dynamically integrates literature, code, and domain knowledge to inspire diverse innovation pathways
26
+
27
+ ### 🏗️ Idea-to-Methodology Construction
28
+ - Systematically transforms creative ideas into actionable and verifiable research methodologies
29
+ - Integrates baseline code, literature, and expert knowledge to automatically generate comprehensive methodological frameworks
30
+ - Supports iterative refinement and traceability of research methods
31
+
32
+ ### 🛠️ Evolutionary Experimental Planning and Execution
33
+ - Automates complex experimental workflow planning, code implementation, and debugging
34
+ - Employs exception-guided intelligent debugging to automatically identify and resolve code issues
35
+ - Enables adaptive evolution and continuous optimization of experimental plans
36
+
37
+ ### 🤖 Multi-Agent Orchestration
38
+ - Coordinates specialized agents such as Survey, Coding, Idea Innovation, and Assessment Agents and so on
39
+ - Manages data flow, task scheduling, and human interaction points for efficient and coherent research processes
40
+ - Supports extensibility and compatibility with diverse scientific tasks
41
+
42
+ ---
43
+
44
+ **NovelSeek** delivers an "end-to-end algorithmic innovation", empowering AI+X researchers to rapidly complete the full research loop—from idea to methodology to experimental validation—accelerating scientific discovery and breakthroughs.
45
+
46
+ ## 🔬 Supported Research Tasks
47
+
48
+ - Suzuki Yield Prediction
49
+ - Molecular Dynamics Simulation
50
+ - Enhancer Activity Prediction
51
+ - Transcription Prediction for Perturbation Respons
52
+ - Power Flow Estimation
53
+ - Time Series Forecasting
54
+ - Semantic Segmentation
55
+ - Image Classification
56
+ - Sentiment Analysis
57
+ - Point Cloud Classification
58
+ - Point Cloud Object Detection
59
+ - VLM & LLM Fine-tuning
60
+ - ......
61
+
62
+
63
+
64
+ ## 🚀 Performance
65
+
66
+ By leveraging multi-source knowledge injection, NovelSeek intelligently generates and verifies research ideas across multiple domains. Our system has significantly improved research efficiency in Suzuki Yield Prediction, Enhancer Activity Prediction, Transcription Prediction for Perturbation Respons, and so on.
67
+
examples/AutoCls2D_Cifar100/Baseline/experiment.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import time
4
+ import argparse
5
+ import pathlib
6
+ from tqdm import tqdm
7
+ import matplotlib.pyplot as plt
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ from torchvision import datasets
12
+ from torch.utils.data import DataLoader
13
+ import torchvision.transforms as transforms
14
+ from torch.optim.lr_scheduler import _LRScheduler
15
+ import traceback
16
+
17
+ CIFAR100_TRAIN_MEAN = (0.5070751592371323, 0.48654887331495095, 0.4409178433670343)
18
+ CIFAR100_TRAIN_STD = (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)
19
+ MILESTONES = [60, 120, 160]
20
+
21
+
22
+ class WideBasicBlock(nn.Module):
23
+ def __init__(self, in_planes, out_planes, dropout_rate, stride=1):
24
+ super(WideBasicBlock, self).__init__()
25
+ self.bn1 = nn.BatchNorm2d(in_planes)
26
+ self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
27
+ self.dropout = nn.Dropout(p=dropout_rate)
28
+ self.bn2 = nn.BatchNorm2d(out_planes)
29
+ self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
30
+ self.relu = nn.ReLU(inplace=True)
31
+
32
+ if in_planes != out_planes:
33
+ self.shortcut = nn.Conv2d(
34
+ in_planes,
35
+ out_planes,
36
+ kernel_size=1,
37
+ stride=stride,
38
+ padding=0,
39
+ bias=False,
40
+ )
41
+ else:
42
+ self.shortcut = nn.Identity()
43
+
44
+ def forward(self, x):
45
+ out = self.relu(self.bn1(x))
46
+ skip_x = x if isinstance(self.shortcut, nn.Identity) else out
47
+
48
+ out = self.conv1(out)
49
+ out = self.relu(self.bn2(out))
50
+ out = self.dropout(out)
51
+ out = self.conv2(out)
52
+ out += self.shortcut(skip_x)
53
+
54
+ return out
55
+
56
+
57
+ class WideResNet(nn.Module):
58
+ def __init__(self, depth, widen_factor, num_classes, dropout_rate):
59
+ super(WideResNet, self).__init__()
60
+
61
+ assert (depth - 4) % 6 == 0, "Wide-resnet depth should be 6n+4"
62
+ n = (depth - 4) / 6
63
+
64
+ n_stages = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
65
+
66
+ self.conv1 = nn.Conv2d(3, n_stages[0], kernel_size=3, stride=1, padding=1, bias=False)
67
+ self.stage1 = self._make_wide_stage(WideBasicBlock, n_stages[0], n_stages[1], n, dropout_rate, stride=1)
68
+ self.stage2 = self._make_wide_stage(WideBasicBlock, n_stages[1], n_stages[2], n, dropout_rate, stride=2)
69
+ self.stage3 = self._make_wide_stage(WideBasicBlock, n_stages[2], n_stages[3], n, dropout_rate, stride=2)
70
+ self.bn1 = nn.BatchNorm2d(n_stages[3])
71
+ self.relu = nn.ReLU(inplace=True)
72
+ self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
73
+ self.linear = nn.Linear(n_stages[3], num_classes)
74
+
75
+ self._init_params()
76
+
77
+ @staticmethod
78
+ def _make_wide_stage(block, in_planes, out_planes, num_blocks, dropout_rate, stride):
79
+ stride_list = [stride] + [1] * (int(num_blocks) - 1)
80
+ in_planes_list = [in_planes] + [out_planes] * (int(num_blocks) - 1)
81
+ blocks = []
82
+
83
+ for _in_planes, _stride in zip(in_planes_list, stride_list):
84
+ blocks.append(block(_in_planes, out_planes, dropout_rate, _stride))
85
+
86
+ return nn.Sequential(*blocks)
87
+
88
+ def _init_params(self):
89
+ for m in self.modules():
90
+ if isinstance(m, nn.Conv2d):
91
+ nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
92
+ elif isinstance(m, nn.BatchNorm2d):
93
+ if m.affine:
94
+ m.weight.data.fill_(1)
95
+ m.bias.data.zero_()
96
+ elif isinstance(m, nn.Linear):
97
+ if m.bias is not None:
98
+ m.bias.data.zero_()
99
+
100
+ def forward(self, x):
101
+ out = self.conv1(x)
102
+ out = self.stage1(out)
103
+ out = self.stage2(out)
104
+ out = self.stage3(out)
105
+ out = self.relu(self.bn1(out))
106
+ out = self.avg_pool(out)
107
+ out = out.view(out.size(0), -1)
108
+ out = self.linear(out)
109
+
110
+ return out
111
+
112
+
113
+ def wide_resnet_28_10_old():
114
+ return WideResNet(
115
+ depth=28,
116
+ widen_factor=10,
117
+ num_classes=100,
118
+ dropout_rate=0.0,
119
+ )
120
+
121
+
122
+ if __name__ == "__main__":
123
+ parser = argparse.ArgumentParser()
124
+ parser.add_argument("--batch_size", type=int, default=128)
125
+ parser.add_argument("--num_workers", type=int, default=4)
126
+ parser.add_argument("--out_dir", type=str, default="run_1")
127
+ parser.add_argument("--in_channels", type=int, default=3)
128
+ parser.add_argument("--data_root", type=str, default='./datasets/cifar100/')
129
+ parser.add_argument("--learning_rate", type=float, default=0.1)
130
+ parser.add_argument("", type=int, default=200)
131
+ parser.add_argument("--val_per_epoch", type=int, default=5)
132
+ config = parser.parse_args()
133
+
134
+
135
+ try:
136
+ final_infos = {}
137
+ all_results = {}
138
+
139
+ pathlib.Path(config.out_dir).mkdir(parents=True, exist_ok=True)
140
+
141
+ model = wide_resnet_28_10_old().cuda()
142
+ transform_train = transforms.Compose([
143
+ transforms.ToTensor(),
144
+ transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),
145
+ (4, 4, 4, 4), mode='reflect').squeeze()),
146
+ transforms.ToPILImage(),
147
+ transforms.RandomCrop(32),
148
+ transforms.RandomHorizontalFlip(),
149
+ transforms.ToTensor(),
150
+ transforms.Normalize(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD),
151
+ ])
152
+
153
+ transform_test = transforms.Compose([
154
+ transforms.ToTensor(),
155
+ transforms.Normalize(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD)
156
+ ])
157
+ train_dataset = datasets.CIFAR100(root=config.data_root, train=True,
158
+ download=True, transform=transform_train)
159
+ test_dataset = datasets.CIFAR100(root=config.data_root, train=False,
160
+ download=True, transform=transform_test)
161
+ train_loader = DataLoader(train_dataset, shuffle=True, num_workers=config.num_workers, batch_size=config.batch_size)
162
+ test_loader = DataLoader(test_dataset, shuffle=True, num_workers=config.num_workers, batch_size=config.batch_size)
163
+
164
+ criterion = nn.CrossEntropyLoss().cuda()
165
+ optimizer = torch.optim.SGD(model.parameters(), lr=config.learning_rate, momentum=0.9, weight_decay=5e-4,
166
+ nesterov=True)
167
+ scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, len(train_loader) * config.max_epoch)
168
+
169
+ best_acc = 0.0
170
+ start_time = time.time()
171
+ for cur_epoch in tqdm(range(1, config.max_epoch + 1)):
172
+ model.train()
173
+ for batch_idx, (images, labels) in enumerate(tqdm(train_loader)):
174
+ images, labels = images.cuda(), labels.cuda()
175
+ optimizer.zero_grad()
176
+ outputs = model(images)
177
+ loss = criterion(outputs, labels)
178
+ loss.backward()
179
+ optimizer.step()
180
+ scheduler.step()
181
+
182
+ print(f'Finished epoch {cur_epoch} training.')
183
+
184
+ if (cur_epoch % config.val_per_epoch == 0 and cur_epoch != 0) or cur_epoch == (config.max_epoch - 1):
185
+ model.eval()
186
+ correct = 0.0
187
+ for images, labels in tqdm(test_loader):
188
+ images, labels = images.cuda(), labels.cuda()
189
+ with torch.no_grad():
190
+ outputs = model(images)
191
+
192
+ _, preds = outputs.max(1)
193
+ correct += preds.eq(labels).sum()
194
+ cur_acc = correct.float() / len(test_loader.dataset)
195
+ print(f"Epoch: {cur_epoch}, Accuracy: {correct.float() / len(test_loader.dataset)}")
196
+
197
+ if cur_acc > best_acc:
198
+ best_acc = cur_acc
199
+ best_epoch = cur_epoch
200
+ torch.save(model.state_dict(), os.path.join(config.out_dir, 'best.pth'))
201
+
202
+ final_infos = {
203
+ "cifar100": {
204
+ "means": {
205
+ "best_acc": best_acc.item(),
206
+ "epoch": best_epoch
207
+ }
208
+ }
209
+ }
210
+
211
+ with open(os.path.join(config.out_dir, "final_info.json"), "w") as f:
212
+ json.dump(final_infos, f)
213
+
214
+ except Exception as e:
215
+ print("Original error in subprocess:", flush=True)
216
+ traceback.print_exc(file=open(os.path.join(config.out_dir, "traceback.log"), "w"))
217
+ raise
examples/AutoCls2D_Cifar100/Baseline/final_info.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cifar100": {"means": {"best_acc": 0.8120, "epoch": 190}}}
examples/AutoCls2D_Cifar100/Baseline/launcher.sh ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ python experiment.py \
2
+ --num_workers 4 \
3
+ --out_dir run_1 \
4
+ --in_channels 3 \
5
+ --data_root ./datasets/cifar100/ \
6
+ --max_epoch 200 \
7
+ --val_per_epoch 5
examples/AutoCls2D_Cifar100/HARCNet/experiment.py ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import time
4
+ import argparse
5
+ import pathlib
6
+ from tqdm import tqdm
7
+ import matplotlib.pyplot as plt
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ from torchvision import datasets
12
+ from torch.utils.data import DataLoader
13
+ import torchvision.transforms as transforms
14
+ from torch.optim.lr_scheduler import _LRScheduler
15
+ import traceback
16
+ import numpy as np
17
+ from harcnet import AdaptiveAugmentation, TemporalConsistencyRegularization
18
+
19
+ CIFAR100_TRAIN_MEAN = (0.5070751592371323, 0.48654887331495095, 0.4409178433670343)
20
+ CIFAR100_TRAIN_STD = (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)
21
+ MILESTONES = [60, 120, 160]
22
+
23
+
24
+ class WideBasicBlock(nn.Module):
25
+ def __init__(self, in_planes, out_planes, dropout_rate, stride=1):
26
+ super(WideBasicBlock, self).__init__()
27
+ self.bn1 = nn.BatchNorm2d(in_planes)
28
+ self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
29
+ self.dropout = nn.Dropout(p=dropout_rate)
30
+ self.bn2 = nn.BatchNorm2d(out_planes)
31
+ self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
32
+ self.relu = nn.ReLU(inplace=True)
33
+
34
+ if in_planes != out_planes:
35
+ self.shortcut = nn.Conv2d(
36
+ in_planes,
37
+ out_planes,
38
+ kernel_size=1,
39
+ stride=stride,
40
+ padding=0,
41
+ bias=False,
42
+ )
43
+ else:
44
+ self.shortcut = nn.Identity()
45
+
46
+ def forward(self, x):
47
+ out = self.relu(self.bn1(x))
48
+ skip_x = x if isinstance(self.shortcut, nn.Identity) else out
49
+
50
+ out = self.conv1(out)
51
+ out = self.relu(self.bn2(out))
52
+ out = self.dropout(out)
53
+ out = self.conv2(out)
54
+ out += self.shortcut(skip_x)
55
+
56
+ return out
57
+
58
+
59
+ class WideResNet(nn.Module):
60
+ def __init__(self, depth, widen_factor, num_classes, dropout_rate):
61
+ super(WideResNet, self).__init__()
62
+
63
+ assert (depth - 4) % 6 == 0, "Wide-resnet depth should be 6n+4"
64
+ n = (depth - 4) / 6
65
+
66
+ n_stages = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
67
+
68
+ self.conv1 = nn.Conv2d(3, n_stages[0], kernel_size=3, stride=1, padding=1, bias=False)
69
+ self.stage1 = self._make_wide_stage(WideBasicBlock, n_stages[0], n_stages[1], n, dropout_rate, stride=1)
70
+ self.stage2 = self._make_wide_stage(WideBasicBlock, n_stages[1], n_stages[2], n, dropout_rate, stride=2)
71
+ self.stage3 = self._make_wide_stage(WideBasicBlock, n_stages[2], n_stages[3], n, dropout_rate, stride=2)
72
+ self.bn1 = nn.BatchNorm2d(n_stages[3])
73
+ self.relu = nn.ReLU(inplace=True)
74
+ self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
75
+ self.linear = nn.Linear(n_stages[3], num_classes)
76
+
77
+ self._init_params()
78
+
79
+ @staticmethod
80
+ def _make_wide_stage(block, in_planes, out_planes, num_blocks, dropout_rate, stride):
81
+ stride_list = [stride] + [1] * (int(num_blocks) - 1)
82
+ in_planes_list = [in_planes] + [out_planes] * (int(num_blocks) - 1)
83
+ blocks = []
84
+
85
+ for _in_planes, _stride in zip(in_planes_list, stride_list):
86
+ blocks.append(block(_in_planes, out_planes, dropout_rate, _stride))
87
+
88
+ return nn.Sequential(*blocks)
89
+
90
+ def _init_params(self):
91
+ for m in self.modules():
92
+ if isinstance(m, nn.Conv2d):
93
+ nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
94
+ elif isinstance(m, nn.BatchNorm2d):
95
+ if m.affine:
96
+ m.weight.data.fill_(1)
97
+ m.bias.data.zero_()
98
+ elif isinstance(m, nn.Linear):
99
+ if m.bias is not None:
100
+ m.bias.data.zero_()
101
+
102
+ def forward(self, x):
103
+ out = self.conv1(x)
104
+ out = self.stage1(out)
105
+ out = self.stage2(out)
106
+ out = self.stage3(out)
107
+ out = self.relu(self.bn1(out))
108
+ out = self.avg_pool(out)
109
+ out = out.view(out.size(0), -1)
110
+ out = self.linear(out)
111
+
112
+ return out
113
+
114
+
115
+ def wide_resnet_28_10_old():
116
+ return WideResNet(
117
+ depth=28,
118
+ widen_factor=10,
119
+ num_classes=100,
120
+ dropout_rate=0.0,
121
+ )
122
+
123
+
124
+ if __name__ == "__main__":
125
+ parser = argparse.ArgumentParser()
126
+ parser.add_argument("--batch_size", type=int, default=128)
127
+ parser.add_argument("--num_workers", type=int, default=4)
128
+ parser.add_argument("--out_dir", type=str, default="run_5")
129
+ parser.add_argument("--in_channels", type=int, default=3)
130
+ parser.add_argument("--data_root", type=str, default='./datasets/imagenet')
131
+ parser.add_argument("--learning_rate", type=float, default=0.1)
132
+ parser.add_argument("--max_epoch", type=int, default=200)
133
+ parser.add_argument("--val_per_epoch", type=int, default=5)
134
+ # HARCNet parameters
135
+ parser.add_argument("--alpha", type=float, default=0.6, help="Weight for variance in adaptive augmentation")
136
+ parser.add_argument("--beta", type=float, default=0.6, help="Weight for entropy in adaptive augmentation")
137
+ parser.add_argument("--gamma", type=float, default=2.2, help="Scaling factor for MixUp interpolation")
138
+ parser.add_argument("--memory_size", type=int, default=5, help="Number of past predictions to store")
139
+ parser.add_argument("--decay_rate", type=float, default=2.0, help="Decay rate for temporal consistency")
140
+ parser.add_argument("--consistency_weight", type=float, default=0.05, help="Weight for consistency loss")
141
+ parser.add_argument("--auxiliary_weight", type=float, default=0.05, help="Weight for auxiliary loss")
142
+ parser.add_argument("--use_adaptive_aug", type=bool, default=True, help="Use adaptive augmentation")
143
+ parser.add_argument("--use_temporal_consistency", type=bool, default=True, help="Use temporal consistency")
144
+ config = parser.parse_args()
145
+
146
+
147
+ try:
148
+ final_infos = {}
149
+ all_results = {}
150
+
151
+ pathlib.Path(config.out_dir).mkdir(parents=True, exist_ok=True)
152
+
153
+ model = wide_resnet_28_10_old().cuda()
154
+
155
+ # Initialize HARCNet components
156
+ adaptive_aug = AdaptiveAugmentation(
157
+ alpha=config.alpha,
158
+ beta=config.beta,
159
+ gamma=config.gamma
160
+ )
161
+
162
+ temporal_consistency = TemporalConsistencyRegularization(
163
+ memory_size=config.memory_size,
164
+ decay_rate=config.decay_rate,
165
+ consistency_weight=config.consistency_weight
166
+ )
167
+
168
+ transform_train = transforms.Compose([
169
+ transforms.ToTensor(),
170
+ transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),
171
+ (4, 4, 4, 4), mode='reflect').squeeze()),
172
+ transforms.ToPILImage(),
173
+ transforms.RandomCrop(32),
174
+ transforms.RandomHorizontalFlip(),
175
+ transforms.ToTensor(),
176
+ transforms.Normalize(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD),
177
+ ])
178
+
179
+ transform_test = transforms.Compose([
180
+ transforms.ToTensor(),
181
+ transforms.Normalize(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD)
182
+ ])
183
+
184
+ train_dataset = datasets.CIFAR100(root=config.data_root, train=True,
185
+ download=True, transform=transform_train)
186
+ test_dataset = datasets.CIFAR100(root=config.data_root, train=False,
187
+ download=True, transform=transform_test)
188
+
189
+ # Create a dataset wrapper that provides sample indices
190
+ class IndexedDataset(torch.utils.data.Dataset):
191
+ def __init__(self, dataset):
192
+ self.dataset = dataset
193
+
194
+ def __getitem__(self, index):
195
+ data, target = self.dataset[index]
196
+ return data, target, index
197
+
198
+ def __len__(self):
199
+ return len(self.dataset)
200
+
201
+ indexed_train_dataset = IndexedDataset(train_dataset)
202
+
203
+ train_loader = DataLoader(indexed_train_dataset, shuffle=True, num_workers=config.num_workers, batch_size=config.batch_size)
204
+ test_loader = DataLoader(test_dataset, shuffle=False, num_workers=config.num_workers, batch_size=config.batch_size)
205
+
206
+ criterion = nn.CrossEntropyLoss().cuda()
207
+ optimizer = torch.optim.SGD(model.parameters(), lr=config.learning_rate, momentum=0.9, weight_decay=5e-4,
208
+ nesterov=True)
209
+ scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, len(train_loader) * config.max_epoch)
210
+
211
+ best_acc = 0.0
212
+ start_time = time.time()
213
+ for cur_epoch in tqdm(range(1, config.max_epoch + 1)):
214
+ model.train()
215
+ epoch_loss = 0.0
216
+ epoch_cls_loss = 0.0
217
+ epoch_consistency_loss = 0.0
218
+
219
+ for batch_idx, (images, labels, indices) in enumerate(tqdm(train_loader)):
220
+ images, labels, indices = images.cuda(), labels.cuda(), indices.cuda()
221
+
222
+ # Apply adaptive augmentation if enabled
223
+ if config.use_adaptive_aug:
224
+ # First forward pass to get predictions for adaptive augmentation
225
+ with torch.no_grad():
226
+ initial_outputs = model(images)
227
+ initial_probs = F.softmax(initial_outputs, dim=1)
228
+
229
+ # Apply MixUp with adaptive coefficient
230
+ if np.random.rand() < 0.5: # Apply MixUp with 50% probability
231
+ mixed_images, labels_a, labels_b, lam = adaptive_aug.apply_mixup(images, labels, num_classes=100)
232
+ images = mixed_images
233
+
234
+ # Forward pass with mixed images
235
+ outputs = model(images)
236
+
237
+ # MixUp loss
238
+ cls_loss = lam * criterion(outputs, labels_a) + (1 - lam) * criterion(outputs, labels_b)
239
+ else:
240
+ # Forward pass without MixUp
241
+ outputs = model(images)
242
+ cls_loss = criterion(outputs, labels)
243
+ else:
244
+ # Standard forward pass without adaptive augmentation
245
+ outputs = model(images)
246
+ cls_loss = criterion(outputs, labels)
247
+
248
+ # Compute consistency loss if enabled
249
+ consistency_loss = torch.tensor(0.0).cuda()
250
+ if config.use_temporal_consistency:
251
+ # Get softmax probabilities
252
+ probs = F.softmax(outputs, dim=1)
253
+
254
+ # Update prediction history
255
+ temporal_consistency.update_history(indices, probs)
256
+
257
+ # Compute consistency loss
258
+ consistency_loss = temporal_consistency.compute_consistency_loss(probs, indices)
259
+
260
+ # Total loss
261
+ loss = cls_loss + config.consistency_weight * consistency_loss
262
+
263
+ # Backward and optimize
264
+ optimizer.zero_grad()
265
+ loss.backward()
266
+ optimizer.step()
267
+ scheduler.step()
268
+
269
+ # Track losses
270
+ epoch_loss += loss.item()
271
+ epoch_cls_loss += cls_loss.item()
272
+ epoch_consistency_loss += consistency_loss.item() if isinstance(consistency_loss, torch.Tensor) else 0
273
+
274
+ # Calculate average losses
275
+ avg_loss = epoch_loss / len(train_loader)
276
+ avg_cls_loss = epoch_cls_loss / len(train_loader)
277
+ avg_consistency_loss = epoch_consistency_loss / len(train_loader)
278
+
279
+ print(f'Epoch {cur_epoch} - Loss: {avg_loss:.4f}, Cls Loss: {avg_cls_loss:.4f}, Consistency Loss: {avg_consistency_loss:.4f}')
280
+ print(f'Finished epoch {cur_epoch} training.')
281
+
282
+ if (cur_epoch % config.val_per_epoch == 0 and cur_epoch != 0) or cur_epoch == (config.max_epoch - 1):
283
+ model.eval()
284
+ correct = 0.0
285
+ for images, labels in tqdm(test_loader):
286
+ images, labels = images.cuda(), labels.cuda()
287
+ with torch.no_grad():
288
+ outputs = model(images)
289
+
290
+ _, preds = outputs.max(1)
291
+ correct += preds.eq(labels).sum()
292
+ cur_acc = correct.float() / len(test_loader.dataset)
293
+ print(f"Epoch: {cur_epoch}, Accuracy: {correct.float() / len(test_loader.dataset)}")
294
+
295
+ if cur_acc > best_acc:
296
+ best_acc = cur_acc
297
+ best_epoch = cur_epoch
298
+ torch.save(model.state_dict(), os.path.join(config.out_dir, 'best.pth'))
299
+
300
+ final_infos = {
301
+ "cifar100": {
302
+ "means": {
303
+ "best_acc": best_acc.item(),
304
+ "epoch": best_epoch
305
+ },
306
+ "config": {
307
+ "alpha": config.alpha,
308
+ "beta": config.beta,
309
+ "gamma": config.gamma,
310
+ "memory_size": config.memory_size,
311
+ "decay_rate": config.decay_rate,
312
+ "consistency_weight": config.consistency_weight,
313
+ "auxiliary_weight": config.auxiliary_weight,
314
+ "use_adaptive_aug": config.use_adaptive_aug,
315
+ "use_temporal_consistency": config.use_temporal_consistency
316
+ }
317
+ }
318
+ }
319
+
320
+ with open(os.path.join(config.out_dir, "final_info.json"), "w") as f:
321
+ json.dump(final_infos, f)
322
+
323
+ except Exception as e:
324
+ print("Original error in subprocess:", flush=True)
325
+ traceback.print_exc(file=open(os.path.join(config.out_dir, "traceback.log"), "w"))
326
+ raise
examples/AutoCls2D_Cifar100/HARCNet/harcnet.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import numpy as np
5
+ from scipy.stats import entropy
6
+
7
+
8
+ class AdaptiveAugmentation:
9
+ """
10
+ Implements adaptive data-driven augmentation for HARCNet.
11
+ Dynamically adjusts geometric and MixUp augmentations based on data distribution.
12
+ """
13
+ def __init__(self, alpha=0.5, beta=0.5, gamma=2.0):
14
+ """
15
+ Args:
16
+ alpha: Weight for variance component in geometric augmentation
17
+ beta: Weight for entropy component in geometric augmentation
18
+ gamma: Scaling factor for MixUp interpolation
19
+ """
20
+ self.alpha = alpha
21
+ self.beta = beta
22
+ self.gamma = gamma
23
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
24
+
25
+ def compute_variance(self, x):
26
+ """Compute variance across feature dimensions"""
27
+ # x shape: [B, C, H, W]
28
+ # Compute variance across channels for each spatial location
29
+ var = torch.var(x, dim=1, keepdim=True) # [B, 1, H, W]
30
+ return var.mean(dim=[1, 2, 3]) # [B]
31
+
32
+ def compute_entropy(self, probs):
33
+ """Compute entropy of probability distributions"""
34
+ # probs shape: [B, C] where C is number of classes
35
+ # Ensure valid probability distribution
36
+ probs = torch.clamp(probs, min=1e-8, max=1.0)
37
+ log_probs = torch.log(probs)
38
+ entropy_val = -torch.sum(probs * log_probs, dim=1) # [B]
39
+ return entropy_val
40
+
41
+ def get_geometric_strength(self, x, model=None, probs=None):
42
+ """
43
+ Compute geometric augmentation strength based on sample variance and entropy
44
+ S_g(x_i) = α·Var(x_i) + β·Entropy(x_i)
45
+ """
46
+ var = self.compute_variance(x)
47
+
48
+ # If model predictions are provided, use them for entropy calculation
49
+ if probs is None and model is not None:
50
+ with torch.no_grad():
51
+ logits = model(x)
52
+ probs = F.softmax(logits, dim=1)
53
+
54
+ if probs is not None:
55
+ ent = self.compute_entropy(probs)
56
+ else:
57
+ # Default entropy if no predictions available
58
+ ent = torch.ones_like(var)
59
+
60
+ # Normalize to [0, 1] range
61
+ var = (var - var.min()) / (var.max() - var.min() + 1e-8)
62
+ ent = (ent - ent.min()) / (ent.max() - ent.min() + 1e-8)
63
+
64
+ strength = self.alpha * var + self.beta * ent
65
+ return strength
66
+
67
+ def get_mixup_params(self, y, num_classes=100):
68
+ """
69
+ Generate MixUp parameters based on label entropy
70
+ λ ~ Beta(γ·Entropy(y), γ·Entropy(y))
71
+ """
72
+ # Convert labels to one-hot encoding
73
+ y_onehot = F.one_hot(y, num_classes=num_classes).float()
74
+
75
+ # Compute entropy of ground truth labels (across batch)
76
+ batch_entropy = self.compute_entropy(y_onehot.mean(dim=0, keepdim=True)).item()
77
+
78
+ # Generate mixup coefficient from Beta distribution
79
+ alpha = self.gamma * batch_entropy
80
+ alpha = max(0.1, min(alpha, 2.0)) # Bound alpha between 0.1 and 2.0
81
+
82
+ lam = np.random.beta(alpha, alpha)
83
+
84
+ # Generate random permutation for mixing
85
+ batch_size = y.size(0)
86
+ index = torch.randperm(batch_size).to(self.device)
87
+
88
+ return lam, index
89
+
90
+ def apply_mixup(self, x, y, num_classes=100):
91
+ """Apply MixUp augmentation with adaptive coefficient"""
92
+ lam, index = self.get_mixup_params(y, num_classes)
93
+ mixed_x = lam * x + (1 - lam) * x[index]
94
+ y_a, y_b = y, y[index]
95
+ return mixed_x, y_a, y_b, lam
96
+
97
+
98
+ class TemporalConsistencyRegularization:
99
+ """
100
+ Implements decayed temporal consistency regularization for HARCNet.
101
+ Reduces noise in pseudo-labels by incorporating past predictions.
102
+ """
103
+ def __init__(self, memory_size=5, decay_rate=2.0, consistency_weight=0.1):
104
+ """
105
+ Args:
106
+ memory_size: Number of past predictions to store (K)
107
+ decay_rate: Controls the decay of weights for past predictions (τ)
108
+ consistency_weight: Weight for consistency loss (λ_consistency)
109
+ """
110
+ self.memory_size = memory_size
111
+ self.decay_rate = decay_rate
112
+ self.consistency_weight = consistency_weight
113
+ self.prediction_history = {} # Store past predictions for each sample
114
+
115
+ def compute_decay_weights(self):
116
+ """
117
+ Compute exponentially decaying weights
118
+ ω_k = e^(-k/τ) / Σ(e^(-k/τ))
119
+ """
120
+ weights = torch.exp(-torch.arange(1, self.memory_size + 1) / self.decay_rate)
121
+ return weights / weights.sum()
122
+
123
+ def update_history(self, indices, predictions):
124
+ """Update prediction history for each sample"""
125
+ for i, idx in enumerate(indices):
126
+ idx = idx.item()
127
+ if idx not in self.prediction_history:
128
+ self.prediction_history[idx] = []
129
+
130
+ # Add current prediction to history
131
+ self.prediction_history[idx].append(predictions[i].detach())
132
+
133
+ # Keep only the most recent K predictions
134
+ if len(self.prediction_history[idx]) > self.memory_size:
135
+ self.prediction_history[idx].pop(0)
136
+
137
+ def get_aggregated_predictions(self, indices):
138
+ """
139
+ Get aggregated predictions for each sample using decay weights
140
+ ỹ_i = Σ(ω_k · ŷ_i^(t-k))
141
+ """
142
+ weights = self.compute_decay_weights().to(indices.device)
143
+ aggregated_preds = []
144
+
145
+ for i, idx in enumerate(indices):
146
+ idx = idx.item()
147
+ if idx in self.prediction_history and len(self.prediction_history[idx]) > 0:
148
+ # Get available history (might be less than memory_size)
149
+ history = self.prediction_history[idx]
150
+ history_len = len(history)
151
+
152
+ if history_len > 0:
153
+ # Use available weights
154
+ available_weights = weights[-history_len:]
155
+ available_weights = available_weights / available_weights.sum()
156
+
157
+ # Compute weighted sum
158
+ weighted_sum = torch.zeros_like(history[0])
159
+ for j, pred in enumerate(history):
160
+ weighted_sum += available_weights[j] * pred
161
+
162
+ aggregated_preds.append(weighted_sum)
163
+ else:
164
+ # No history available, use zeros
165
+ aggregated_preds.append(torch.zeros_like(history[0]))
166
+ else:
167
+ # No history for this sample, return None
168
+ aggregated_preds.append(None)
169
+
170
+ return aggregated_preds
171
+
172
+ def compute_consistency_loss(self, current_preds, indices):
173
+ """
174
+ Compute consistency loss between current and aggregated past predictions
175
+ L_consistency(x_i) = ||ŷ_i^(t) - Σ(ω_k · ŷ_i^(t-k))||^2_2
176
+ """
177
+ aggregated_preds = self.get_aggregated_predictions(indices)
178
+ loss = 0.0
179
+ valid_samples = 0
180
+
181
+ for i, agg_pred in enumerate(aggregated_preds):
182
+ if agg_pred is not None:
183
+ # Compute MSE between current and aggregated predictions
184
+ sample_loss = F.mse_loss(current_preds[i], agg_pred)
185
+ loss += sample_loss
186
+ valid_samples += 1
187
+
188
+ # Return average loss if there are valid samples
189
+ if valid_samples > 0:
190
+ return loss / valid_samples
191
+ else:
192
+ # Return zero loss if no valid samples
193
+ return torch.tensor(0.0).to(current_preds.device)
examples/AutoCls2D_Cifar100/HARCNet/idea.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "HARCNet",
3
+ "title": "HARCNet: Hierarchical Adaptive Regularization and Consistency Network for Robust Image Classification",
4
+ "description": "HARCNet combines hierarchical adaptive augmentation with mathematically grounded regularization mechanisms inspired by human visual processing to improve robustness in image classification tasks. The method integrates (1) an adaptive augmentation mechanism that dynamically modulates geometric transformations based on data distribution, and (2) a decayed temporal consistency regularization framework underpinned by formal mathematical formulations, ensuring smoother pseudo-labeling and improved convergence. These components collaborate synergistically to achieve robust classification performance on CIFAR-100.",
5
+ "statement": "HARCNet introduces both an adaptive augmentation mechanism and a mathematically substantiated temporal consistency regularization framework with a clear focus on enhancing image classification. The novel aspects include (1) using dynamic modulation of MixUp and geometric augmentation strengths based on data distribution statistics, which optimally augments training data while preserving its complexity, and (2) a formal decayed temporal consistency regularization mechanism that stabilizes pseudo-labeling while mitigating stochastic noise via weighted past predictions. These innovations address critiques of unclear formulations and theoretical justifications, providing a cohesive and reproducibly implementable design significantly differentiated from existing methods.",
6
+ "method": "### Enhanced Method Description\n\n#### Key Contribution 1: Adaptive Data-Driven Augmentation\nHARCNet employs an adaptive augmentation mechanism that adjusts the intensity of geometric and MixUp augmentations dynamically based on data distribution statistics. Specifically, the augmentation strength is computed using the following:\n\n1. **Dynamic Geometric Transformation**:\n Let \\( S_{g} \\) represent the geometric augmentation strength, which is updated as follows:\n \n \\[\n S_{g}(x_i) = \\alpha \\cdot \\text{Var}(x_i) + \\beta \\cdot \\text{Entropy}(x_i)\n \\]\n \n where \\( \\text{Var}(x_i) \\) denotes the attribute variance of sample \\( x_i \\), \\( \\text{Entropy}(x_i) \\) captures its uncertainty (estimated using the model's softmax predictions), and hyperparameters \\( \\alpha \\) and \\( \\beta \\) control the weighting. Higher variance and uncertainty lead to stronger augmentations.\n\n2. **MixUp Modulation**:\n Augmentation based on MixUp interpolation is similarly orchestrated. The MixUp coefficient \\( \\lambda \\) is sampled from a Beta distribution modified with an adaptive coefficient:\n \n \\[\n \\lambda \\sim \\text{Beta}(\\gamma \\cdot \\text{Entropy}(y), \\gamma \\cdot \\text{Entropy}(y))\n \\]\n \n where \\( y \\) is the ground truth label distribution and \\( \\gamma \\) is a scaling factor that enhances augmentation for higher uncertainty samples.\n\n#### Key Contribution 2: Decayed Temporal Consistency Regularization\nThis component reduces noise in pseudo-labels by incorporating past predictions into the current learning time step. It is supported by a mathematical formulation for exponential decay:\n\n1. **Consistency Objective**:\n For each sample \\( x_i \\), the consistency loss is given by:\n \n \\[\n \\mathcal{L}_{consistency}(x_i) = \\left\\| \\hat{y}_i^{(t)} - \\sum_{k=1}^{K} \\omega_k \\hat{y}_i^{(t-k)} \\right\\|^2_2\n \\]\n \n where \\( \\hat{y}_i^{(t)} \\) is the current model prediction at iteration \\( t \\), \\( \\hat{y}_i^{(t-k)} \\) represents earlier predictions, \\( \\omega_k = \\frac{e^{-k/\\tau}}{\\sum_{k=1}^{K} e^{-k/\\tau}} \\) are exponentially decaying weights, and \\( \\tau \\) is a decay rate controlling the memory span.\n\n2. **Pseudo-Label Refinement**:\n The decayed aggregate prediction is used as a self-regularizing pseudo-label for semi-supervised learning. The aggregated pseudo-label \\( \\tilde{y}_i \\) is defined as:\n \n \\[\n \\tilde{y}_i = \\sum_{k=0}^{K} \\omega_k \\hat{y}_i^{(t-k)}\n \\]\n \n This encourages temporal consistency while reducing high-variance, noisy predictions.\n\n#### Integration Workflow\n1. **Adaptive Augmentation Phase**: Input images are preprocessed using dynamically tuned MixUp and geometric transformations based on their variance and entropy.\n2. **Prediction and Temporal Aggregation**: For each batch, the network evaluates predictions and refines pseudo-labels by aggregating past outputs weighted with the exponential decay mechanism.\n3. **Total Loss Optimization**: The total training loss integrates primary classification loss \\( \\mathcal{L}_{cls} \\), consistency regularization \\( \\mathcal{L}_{consistency} \\), and regularized auxiliary losses:\n \n \\[\n \\mathcal{L} = \\mathcal{L}_{cls} + \\lambda_{consistency} \\mathcal{L}_{consistency} + \\lambda_{auxiliary} \\mathcal{L}_{auxiliary}\n \\]\n\n4. **Optimizer Parameters**: We employ SGD with momentum (0.9) and weight decay (\\( 5 \\times 10^{-4} \\)). The step sizes for \\( \\lambda_{consistency} \\) and \\( \\lambda_{auxiliary} \\) are determined via grid search over the validation set.\n\n#### Experimentation and Validation\nThe framework is rigorously evaluated with ablation studies focusing on compatibility between augmentation, temporal consistency mechanisms, and auxiliary loss optimization. Performance metrics include classification accuracy, robustness against label noise, and consistency improvements. Benchmarks compare HARCNet to ResNet and Vision Transformer models on CIFAR-100, analyzing computational overhead and practical gain in accuracy. Overall, these results demonstrate significant improvements while addressing critiques of mathematical rigor, modular interaction, and reproducibility."
7
+ }
examples/AutoCls2D_Cifar100/HARCNet/launcher.sh ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ python experiment.py \
2
+ --num_workers 4 \
3
+ --out_dir run_1 \
4
+ --in_channels 3 \
5
+ --data_root ./datasets/cifar100 \
6
+ --val_per_epoch 5
examples/AutoCls2D_Cifar100/HARCNet/res/best.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6649698a63faa7a25ffba1a651055552d624d9d714e262cd8bbac56f9aca1b7
3
+ size 146262623
examples/AutoCls2D_Cifar100/HARCNet/res/final_info.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cifar100": {"means": {"best_acc": 0.833299994468689, "epoch": 199}, "config": {"alpha": 0.6, "beta": 0.6, "gamma": 2.2, "memory_size": 5, "decay_rate": 2.0, "consistency_weight": 0.05, "auxiliary_weight": 0.05, "use_adaptive_aug": true, "use_temporal_consistency": true}}}
examples/AutoCls3D_ModelNet40/Baseline/data_transforms.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ def random_point_dropout(batch_pc, max_dropout_ratio=0.875):
5
+ ''' batch_pc: BxNx3 '''
6
+ for b in range(batch_pc.shape[0]):
7
+ dropout_ratio = np.random.random()*max_dropout_ratio # 0~0.875
8
+ drop_idx = np.where(np.random.random((batch_pc.shape[1]))<=dropout_ratio)[0]
9
+ if len(drop_idx)>0:
10
+ batch_pc[b,drop_idx,:] = batch_pc[b,0,:] # set to the first point
11
+ return batch_pc
12
+
13
+ def random_scale_point_cloud(batch_data, scale_low=0.8, scale_high=1.25):
14
+ """ Randomly scale the point cloud. Scale is per point cloud.
15
+ Input:
16
+ BxNx3 array, original batch of point clouds
17
+ Return:
18
+ BxNx3 array, scaled batch of point clouds
19
+ """
20
+ B, N, C = batch_data.shape
21
+ scales = np.random.uniform(scale_low, scale_high, B)
22
+ for batch_index in range(B):
23
+ batch_data[batch_index,:,:] *= scales[batch_index]
24
+ return batch_data
25
+
26
+ def shift_point_cloud(batch_data, shift_range=0.1):
27
+ """ Randomly shift point cloud. Shift is per point cloud.
28
+ Input:
29
+ BxNx3 array, original batch of point clouds
30
+ Return:
31
+ BxNx3 array, shifted batch of point clouds
32
+ """
33
+ B, N, C = batch_data.shape
34
+ shifts = np.random.uniform(-shift_range, shift_range, (B,3))
35
+ for batch_index in range(B):
36
+ batch_data[batch_index,:,:] += shifts[batch_index,:]
37
+ return batch_data
examples/AutoCls3D_ModelNet40/Baseline/experiment.py ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from tqdm import tqdm
3
+ import pickle
4
+ import argparse
5
+ import pathlib
6
+ import json
7
+ import time
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.parallel
11
+ import torch.utils.data
12
+ import numpy as np
13
+ import torch.nn.functional as F
14
+ from torch.utils.data import Dataset, DataLoader
15
+ from metrics import ConfusionMatrix
16
+ import data_transforms
17
+ import argparse
18
+ import random
19
+ import traceback
20
+
21
+ """
22
+ Model
23
+ """
24
+ class STN3d(nn.Module):
25
+ def __init__(self, in_channels):
26
+ super(STN3d, self).__init__()
27
+ self.conv_layers = nn.Sequential(
28
+ nn.Conv1d(in_channels, 64, 1),
29
+ nn.BatchNorm1d(64),
30
+ nn.ReLU(inplace=True),
31
+ nn.Conv1d(64, 128, 1),
32
+ nn.BatchNorm1d(128),
33
+ nn.ReLU(inplace=True),
34
+ nn.Conv1d(128, 1024, 1),
35
+ nn.BatchNorm1d(1024),
36
+ nn.ReLU(inplace=True)
37
+ )
38
+ self.linear_layers = nn.Sequential(
39
+ nn.Linear(1024, 512),
40
+ nn.BatchNorm1d(512),
41
+ nn.ReLU(inplace=True),
42
+ nn.Linear(512, 256),
43
+ nn.BatchNorm1d(256),
44
+ nn.ReLU(inplace=True),
45
+ nn.Linear(256, 9)
46
+ )
47
+ self.iden = torch.from_numpy(np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]).astype(np.float32)).reshape(1, 9)
48
+
49
+ def forward(self, x):
50
+ batchsize = x.size()[0]
51
+ x = self.conv_layers(x)
52
+ x = torch.max(x, 2, keepdim=True)[0]
53
+ x = x.view(-1, 1024)
54
+
55
+ x = self.linear_layers(x)
56
+ iden = self.iden.repeat(batchsize, 1).to(x.device)
57
+ x = x + iden
58
+ x = x.view(-1, 3, 3)
59
+ return x
60
+
61
+
62
+ class STNkd(nn.Module):
63
+ def __init__(self, k=64):
64
+ super(STNkd, self).__init__()
65
+ self.conv_layers = nn.Sequential(
66
+ nn.Conv1d(k, 64, 1),
67
+ nn.BatchNorm1d(64),
68
+ nn.ReLU(inplace=True),
69
+ nn.Conv1d(64, 128, 1),
70
+ nn.BatchNorm1d(128),
71
+ nn.ReLU(inplace=True),
72
+ nn.Conv1d(128, 1024, 1),
73
+ nn.BatchNorm1d(1024),
74
+ nn.ReLU(inplace=True)
75
+ )
76
+ self.linear_layers = nn.Sequential(
77
+ nn.Linear(1024, 512),
78
+ nn.BatchNorm1d(512),
79
+ nn.ReLU(inplace=True),
80
+ nn.Linear(512, 256),
81
+ nn.BatchNorm1d(256),
82
+ nn.ReLU(inplace=True),
83
+ nn.Linear(256, k * k)
84
+ )
85
+ self.k = k
86
+ self.iden = torch.from_numpy(np.eye(self.k).flatten().astype(np.float32)).reshape(1, self.k * self.k)
87
+
88
+ def forward(self, x):
89
+ batchsize = x.size()[0]
90
+ x = self.conv_layers(x)
91
+ x = torch.max(x, 2, keepdim=True)[0]
92
+ x = x.view(-1, 1024)
93
+ x = self.linear_layers(x)
94
+ iden = self.iden.repeat(batchsize, 1).to(x.device)
95
+ x = x + iden
96
+ x = x.view(-1, self.k, self.k)
97
+ return x
98
+
99
+
100
+ class PointNetEncoder(nn.Module):
101
+ def __init__(self, global_feat=True, feature_transform=False, in_channels=3):
102
+ super(PointNetEncoder, self).__init__()
103
+ self.stn = STN3d(in_channels)
104
+ self.conv_layer1 = nn.Sequential(
105
+ nn.Conv1d(in_channels, 64, 1),
106
+ nn.BatchNorm1d(64),
107
+ nn.ReLU(inplace=True),
108
+ nn.Conv1d(64, 64, 1),
109
+ nn.BatchNorm1d(64),
110
+ nn.ReLU(inplace=True)
111
+ )
112
+ self.conv_layer2 = nn.Sequential(
113
+ nn.Conv1d(64, 64, 1),
114
+ nn.BatchNorm1d(64),
115
+ nn.ReLU(inplace=True)
116
+ )
117
+ self.conv_layer3 = nn.Sequential(
118
+ nn.Conv1d(64, 128, 1),
119
+ nn.BatchNorm1d(128),
120
+ nn.ReLU(inplace=True)
121
+ )
122
+ self.conv_layer4 = nn.Sequential(
123
+ nn.Conv1d(128, 1024, 1),
124
+ nn.BatchNorm1d(1024)
125
+ )
126
+ self.global_feat = global_feat
127
+ self.feature_transform = feature_transform
128
+ if self.feature_transform:
129
+ self.fstn = STNkd(k=64)
130
+
131
+ def forward(self, x):
132
+ B, D, N = x.size()
133
+ trans = self.stn(x)
134
+ x = x.transpose(2, 1)
135
+ if D > 3:
136
+ feature = x[:, :, 3:]
137
+ x = x[:, :, :3]
138
+ x = torch.bmm(x, trans)
139
+ if D > 3:
140
+ x = torch.cat([x, feature], dim=2)
141
+ x = x.transpose(2, 1)
142
+ x = self.conv_layer1(x)
143
+
144
+ if self.feature_transform:
145
+ trans_feat = self.fstn(x)
146
+ x = x.transpose(2, 1)
147
+ x = torch.bmm(x, trans_feat)
148
+ x = x.transpose(2, 1)
149
+ else:
150
+ trans_feat = None
151
+
152
+ pointfeat = x
153
+ x = self.conv_layer2(x)
154
+ x = self.conv_layer3(x)
155
+ x = self.conv_layer4(x)
156
+ x = torch.max(x, 2, keepdim=True)[0]
157
+ x = x.view(-1, 1024)
158
+
159
+ # Construct graph and compute context-aware features
160
+ graph = construct_graph(x, args.k)
161
+ context_features = compute_context_aware_features(x, graph)
162
+ x = x + context_features
163
+
164
+ if self.global_feat:
165
+ return x, trans, trans_feat
166
+ else:
167
+ x = x.view(-1, 1024, 1).repeat(1, 1, N)
168
+ return torch.cat([x, pointfeat], 1), trans, trans_feat
169
+
170
+
171
+
172
+ def construct_graph(points, k):
173
+ """
174
+ Construct a dynamic graph where nodes represent points and edges capture semantic similarities.
175
+ """
176
+ # Compute pairwise distances
177
+ dist = torch.cdist(points, points)
178
+ # Get the top k neighbors
179
+ _, indices = torch.topk(dist, k, largest=False, dim=1)
180
+ return indices
181
+
182
+ def compute_context_aware_features(points, graph, normalization_method='mean'):
183
+ """
184
+ Compute context-aware feature adjustments using the constructed graph.
185
+ """
186
+ # Initialize context-aware features
187
+ context_features = torch.zeros_like(points)
188
+ for i in range(points.size(0)):
189
+ neighbors = graph[i]
190
+ if normalization_method == 'mean':
191
+ context_features[i] = points[neighbors].mean(dim=0)
192
+ elif normalization_method == 'max':
193
+ context_features[i] = points[neighbors].max(dim=0)[0]
194
+ elif normalization_method == 'min':
195
+ context_features[i] = points[neighbors].min(dim=0)[0]
196
+ elif normalization_method == 'std':
197
+ context_features[i] = points[neighbors].std(dim=0)
198
+ else:
199
+ raise ValueError("Unknown normalization method: {}".format(normalization_method))
200
+ return context_features
201
+
202
+ def feature_transform_reguliarzer(trans):
203
+ d = trans.size()[1]
204
+ I = torch.eye(d)[None, :, :]
205
+ if trans.is_cuda:
206
+ I = I.cuda()
207
+ loss = torch.mean(torch.norm(torch.bmm(trans, trans.transpose(2, 1)) - I, dim=(1, 2)))
208
+ return loss
209
+
210
+ class Model(nn.Module):
211
+ def __init__(self, in_channels=3, num_classes=40, scale=0.001):
212
+ super().__init__()
213
+ self.mat_diff_loss_scale = scale
214
+ self.backbone = PointNetEncoder(global_feat=True, feature_transform=True, in_channels=in_channels)
215
+ self.cls_head = nn.Sequential(
216
+ nn.Linear(1024, 512),
217
+ nn.BatchNorm1d(512),
218
+ nn.ReLU(inplace=True),
219
+ nn.Linear(512, 256),
220
+ nn.Dropout(p=0.4),
221
+ nn.BatchNorm1d(256),
222
+ nn.ReLU(inplace=True),
223
+ nn.Linear(256, num_classes)
224
+ )
225
+
226
+ def forward(self, x, gts):
227
+ x, trans, trans_feat = self.backbone(x)
228
+ x = self.cls_head(x)
229
+ x = F.log_softmax(x, dim=1)
230
+ loss = F.nll_loss(x, gts)
231
+ mat_diff_loss = feature_transform_reguliarzer(trans_feat)
232
+ total_loss = loss + mat_diff_loss * self.mat_diff_loss_scale
233
+ return total_loss, x
234
+
235
+
236
+ """
237
+ dataset and normalization
238
+ """
239
+ def pc_normalize(pc):
240
+ centroid = np.mean(pc, axis=0)
241
+ pc = pc - centroid
242
+ m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
243
+ pc = pc / m
244
+ return pc
245
+
246
+
247
+ class ModelNetDataset(Dataset):
248
+ def __init__(self, data_root, num_category, num_points, split='train'):
249
+ self.root = data_root
250
+ self.npoints = num_points
251
+ self.uniform = True
252
+ self.use_normals = True
253
+ self.num_category = num_category
254
+
255
+ if self.num_category == 10:
256
+ self.catfile = os.path.join(self.root, 'modelnet10_shape_names.txt')
257
+ else:
258
+ self.catfile = os.path.join(self.root, 'modelnet40_shape_names.txt')
259
+
260
+ self.cat = [line.rstrip() for line in open(self.catfile)]
261
+ self.classes = dict(zip(self.cat, range(len(self.cat))))
262
+
263
+ shape_ids = {}
264
+ if self.num_category == 10:
265
+ shape_ids['train'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet10_train.txt'))]
266
+ shape_ids['test'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet10_test.txt'))]
267
+ else:
268
+ shape_ids['train'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet40_train.txt'))]
269
+ shape_ids['test'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet40_test.txt'))]
270
+
271
+ assert (split == 'train' or split == 'test')
272
+ shape_names = ['_'.join(x.split('_')[0:-1]) for x in shape_ids[split]]
273
+ self.datapath = [(shape_names[i], os.path.join(self.root, shape_names[i], shape_ids[split][i]) + '.txt') for i
274
+ in range(len(shape_ids[split]))]
275
+ print('The size of %s data is %d' % (split, len(self.datapath)))
276
+
277
+ if self.uniform:
278
+ self.data_path = os.path.join(data_root, 'modelnet%d_%s_%dpts_fps.dat' % (self.num_category, split, self.npoints))
279
+ else:
280
+ self.data_path = os.path.join(data_root, 'modelnet%d_%s_%dpts.dat' % (self.num_category, split, self.npoints))
281
+
282
+ print('Load processed data from %s...' % self.data_path)
283
+ with open(self.data_path, 'rb') as f:
284
+ self.list_of_points, self.list_of_labels = pickle.load(f)
285
+
286
+ def __len__(self):
287
+ return len(self.datapath)
288
+
289
+ def __getitem__(self, index):
290
+ point_set, label = self.list_of_points[index], self.list_of_labels[index]
291
+ point_set[:, 0:3] = pc_normalize(point_set[:, 0:3])
292
+ if not self.use_normals:
293
+ point_set = point_set[:, 0:3]
294
+ return point_set, label[0]
295
+
296
+
297
+ def seed_everything(seed=11):
298
+ random.seed(seed)
299
+ np.random.seed(seed)
300
+ torch.manual_seed(seed)
301
+ torch.cuda.manual_seed(seed)
302
+ torch.cuda.manual_seed_all(seed)
303
+ torch.backends.cudnn.deterministic = True
304
+ torch.backends.cudnn.benchmark = False
305
+
306
+
307
+ def main(args):
308
+
309
+ seed_everything(args.seed)
310
+
311
+ final_infos = {}
312
+ all_results = {}
313
+
314
+ pathlib.Path(args.out_dir).mkdir(parents=True, exist_ok=True)
315
+
316
+ datasets, dataloaders = {}, {}
317
+ for split in ['train', 'test']:
318
+ datasets[split] = ModelNetDataset(args.data_root, args.num_category, args.num_points, split)
319
+ dataloaders[split] = DataLoader(datasets[split], batch_size=args.batch_size, shuffle=(split == 'train'),
320
+ drop_last=(split == 'train'), num_workers=8)
321
+
322
+ model = Model(in_channels=args.in_channels).cuda()
323
+ optimizer = torch.optim.Adam(
324
+ model.parameters(), lr=args.learning_rate,
325
+ betas=(0.9, 0.999), eps=1e-8,
326
+ weight_decay=1e-4
327
+ )
328
+ scheduler = torch.optim.lr_scheduler.StepLR(
329
+ optimizer, step_size=20, gamma=0.7
330
+ )
331
+ train_losses = []
332
+ print("Training model...")
333
+ model.train()
334
+ global_step = 0
335
+ cur_epoch = 0
336
+ best_oa = 0
337
+ best_acc = 0
338
+
339
+ start_time = time.time()
340
+ for epoch in tqdm(range(args.max_epoch), desc='training'):
341
+ model.train()
342
+ cm = ConfusionMatrix(num_classes=len(datasets['train'].classes))
343
+ for points, target in tqdm(dataloaders['train'], desc=f'epoch {cur_epoch}/{args.max_epoch}'):
344
+ # data transforms
345
+ points = points.data.numpy()
346
+ points = data_transforms.random_point_dropout(points)
347
+ points[:, :, 0:3] = data_transforms.random_scale_point_cloud(points[:, :, 0:3])
348
+ points[:, :, 0:3] = data_transforms.shift_point_cloud(points[:, :, 0:3])
349
+ points = torch.from_numpy(points).transpose(2, 1).contiguous()
350
+
351
+ points, target = points.cuda(), target.long().cuda()
352
+
353
+ loss, logits = model(points, target)
354
+ loss.backward()
355
+
356
+ torch.nn.utils.clip_grad_norm_(model.parameters(), 1, norm_type=2)
357
+ optimizer.step()
358
+ model.zero_grad()
359
+
360
+
361
+ logs = {"loss": loss.detach().item()}
362
+ train_losses.append(loss.detach().item())
363
+ cm.update(logits.argmax(dim=1), target)
364
+
365
+ scheduler.step()
366
+ end_time = time.time()
367
+ training_time = end_time - start_time
368
+ macc, overallacc, accs = cm.all_acc()
369
+ print(f"iter: {global_step}/{args.max_epoch*len(dataloaders['train'])}, \
370
+ train_macc: {macc}, train_oa: {overallacc}")
371
+
372
+ if (cur_epoch % args.val_per_epoch == 0 and cur_epoch != 0) or cur_epoch == (args.max_epoch - 1):
373
+ model.eval()
374
+ cm = ConfusionMatrix(num_classes=datasets['test'].num_category)
375
+ pbar = tqdm(enumerate(dataloaders['test']), total=dataloaders['test'].__len__())
376
+ # with torch.no_grad():
377
+ for idx, (points, target) in pbar:
378
+ points, target = points.cuda(), target.long().cuda()
379
+ points = points.transpose(2, 1).contiguous()
380
+ loss, logits = model(points, target)
381
+ cm.update(logits.argmax(dim=1), target)
382
+
383
+ tp, count = cm.tp, cm.count
384
+ macc, overallacc, accs = cm.cal_acc(tp, count)
385
+ print(f"iter: {global_step}/{args.max_epoch*len(dataloaders['train'])}, \
386
+ val_macc: {macc}, val_oa: {overallacc}")
387
+
388
+ if overallacc > best_oa:
389
+ best_oa = overallacc
390
+ best_acc = macc
391
+ best_epoch = cur_epoch
392
+ torch.save(model.state_dict(), os.path.join(args.out_dir, 'best.pth'))
393
+ cur_epoch += 1
394
+
395
+ print(f"finish epoch {cur_epoch} training")
396
+
397
+ final_infos = {
398
+ "modelnet" + str(args.num_category):{
399
+ "means":{
400
+ "best_oa": best_oa,
401
+ "best_acc": best_acc,
402
+ "epoch": best_epoch
403
+ }
404
+ }
405
+ }
406
+ with open(os.path.join(args.out_dir, "final_info.json"), "w") as f:
407
+ json.dump(final_infos, f)
408
+
409
+ if __name__ == "__main__":
410
+
411
+ parser = argparse.ArgumentParser()
412
+ parser.add_argument("--batch_size", type=int, default=64)
413
+ parser.add_argument("--out_dir", type=str, default="run_0")
414
+ parser.add_argument("--in_channels", type=int, default=6)
415
+ parser.add_argument("--num_points", type=int, default=1024)
416
+ parser.add_argument("--num_category", type=int, choices=[10, 40], default=40)
417
+ parser.add_argument("--data_root", type=str, default='./datasets/modelnet40')
418
+ parser.add_argument("--learning_rate", type=float, default=1e-3)
419
+ parser.add_argument("--max_epoch", type=int, default=200)
420
+ parser.add_argument("--val_per_epoch", type=int, default=5)
421
+ parser.add_argument("--k", type=int, default=5, help="Number of neighbors for graph construction")
422
+ parser.add_argument("--seed", type=int, default=666)
423
+ args = parser.parse_args()
424
+
425
+ try:
426
+ main(args)
427
+ except Exception as e:
428
+ print("Original error in subprocess:", flush=True)
429
+ traceback.print_exc(file=open(os.path.join(args.out_dir, "traceback.log"), "w"))
430
+ raise
examples/AutoCls3D_ModelNet40/Baseline/final_info.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "modelnet40":{
3
+ "means":{
4
+ "best_oa": 91.0,
5
+ "best_acc": 87.6,
6
+ "epoch": 120
7
+ }
8
+ }
9
+ }
examples/AutoCls3D_ModelNet40/Baseline/launcher.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ python experiment.py \
2
+ --out_dir run_0 \
3
+ --data_root ./datasets/modelnet40 \
4
+ --max_epoch 200 \
5
+ --val_per_epoch 5
examples/AutoCls3D_ModelNet40/Baseline/metrics.py ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from math import log10
2
+ import numpy as np
3
+ import torch
4
+ from sklearn.metrics import confusion_matrix
5
+ import logging
6
+
7
+
8
+ def PSNR(mse, peak=1.):
9
+ return 10 * log10((peak ** 2) / mse)
10
+
11
+
12
+ class SegMetric:
13
+ def __init__(self, values=0.):
14
+ assert isinstance(values, dict)
15
+ self.miou = values.miou
16
+ self.oa = values.get('oa', None)
17
+ self.miou = values.miou
18
+ self.miou = values.miou
19
+
20
+
21
+ def better_than(self, other):
22
+ if self.acc > other.acc:
23
+ return True
24
+ else:
25
+ return False
26
+
27
+ def state_dict(self):
28
+ _dict = dict()
29
+ _dict['acc'] = self.acc
30
+ return _dict
31
+
32
+
33
+ class AverageMeter(object):
34
+ """Computes and stores the average and current value"""
35
+ def __init__(self):
36
+ self.reset()
37
+
38
+ def reset(self):
39
+ self.val = 0
40
+ self.avg = 0
41
+ self.sum = 0
42
+ self.count = 0
43
+
44
+ def update(self, val, n=1):
45
+ self.val = val
46
+ self.sum += val * n
47
+ self.count += n
48
+ self.avg = self.sum / self.count
49
+
50
+
51
+ class ConfusionMatrix:
52
+ """Accumulate a confusion matrix for a classification task.
53
+ ignore_index only supports index <0, or > num_classes
54
+ """
55
+
56
+ def __init__(self, num_classes, ignore_index=None):
57
+ self.value = 0
58
+ self.num_classes = num_classes
59
+ self.virtual_num_classes = num_classes + 1 if ignore_index is not None else num_classes
60
+ self.ignore_index = ignore_index
61
+
62
+ @torch.no_grad()
63
+ def update(self, pred, true):
64
+ """Update the confusion matrix with the given predictions."""
65
+ true = true.flatten()
66
+ pred = pred.flatten()
67
+ if self.ignore_index is not None:
68
+ if (true == self.ignore_index).sum() > 0:
69
+ pred[true == self.ignore_index] = self.virtual_num_classes -1
70
+ true[true == self.ignore_index] = self.virtual_num_classes -1
71
+ unique_mapping = true.flatten() * self.virtual_num_classes + pred.flatten()
72
+ bins = torch.bincount(unique_mapping, minlength=self.virtual_num_classes**2)
73
+ self.value += bins.view(self.virtual_num_classes, self.virtual_num_classes)[:self.num_classes, :self.num_classes]
74
+
75
+ def reset(self):
76
+ """Reset all accumulated values."""
77
+ self.value = 0
78
+
79
+ @property
80
+ def tp(self):
81
+ """Get the true positive samples per-class."""
82
+ return self.value.diag()
83
+
84
+ @property
85
+ def actual(self):
86
+ """Get the false negative samples per-class."""
87
+ return self.value.sum(dim=1)
88
+
89
+ @property
90
+ def predicted(self):
91
+ """Get the false negative samples per-class."""
92
+ return self.value.sum(dim=0)
93
+
94
+ @property
95
+ def fn(self):
96
+ """Get the false negative samples per-class."""
97
+ return self.actual - self.tp
98
+
99
+ @property
100
+ def fp(self):
101
+ """Get the false positive samples per-class."""
102
+ return self.predicted - self.tp
103
+
104
+ @property
105
+ def tn(self):
106
+ """Get the true negative samples per-class."""
107
+ actual = self.actual
108
+ predicted = self.predicted
109
+ return actual.sum() + self.tp - (actual + predicted)
110
+
111
+ @property
112
+ def count(self): # a.k.a. actual positive class
113
+ """Get the number of samples per-class."""
114
+ # return self.tp + self.fn
115
+ return self.value.sum(dim=1)
116
+
117
+ @property
118
+ def frequency(self):
119
+ """Get the per-class frequency."""
120
+ # we avoid dividing by zero using: max(denomenator, 1)
121
+ # return self.count / self.total.clamp(min=1)
122
+ count = self.value.sum(dim=1)
123
+ return count / count.sum().clamp(min=1)
124
+
125
+ @property
126
+ def total(self):
127
+ """Get the total number of samples."""
128
+ return self.value.sum()
129
+
130
+ @property
131
+ def overall_accuray(self):
132
+ return self.tp.sum() / self.total
133
+
134
+ @property
135
+ def union(self):
136
+ return self.value.sum(dim=0) + self.value.sum(dim=1) - self.value.diag()
137
+
138
+ def all_acc(self):
139
+ return self.cal_acc(self.tp, self.count)
140
+
141
+ @staticmethod
142
+ def cal_acc(tp, count):
143
+ acc_per_cls = tp / count.clamp(min=1) * 100
144
+ over_all_acc = tp.sum() / count.sum() * 100
145
+ macc = torch.mean(acc_per_cls) # class accuracy
146
+ return macc.item(), over_all_acc.item(), acc_per_cls.cpu().numpy()
147
+
148
+ @staticmethod
149
+ def print_acc(accs):
150
+ out = '\n Class ' + ' Acc '
151
+ for i, values in enumerate(accs):
152
+ out += '\n' + str(i).rjust(8) + f'{values.item():.2f}'.rjust(8)
153
+ out += '\n' + '-' * 20
154
+ out += '\n' + ' Mean ' + f'{torch.mean(accs).item():.2f}'.rjust(8)
155
+ logging.info(out)
156
+
157
+ def all_metrics(self):
158
+ tp, fp, fn = self.tp, self.fp, self.fn,
159
+
160
+ iou_per_cls = tp / (tp + fp + fn).clamp(min=1) * 100
161
+ acc_per_cls = tp / self.count.clamp(min=1) * 100
162
+ over_all_acc = tp.sum() / self.total * 100
163
+
164
+ miou = torch.mean(iou_per_cls)
165
+ macc = torch.mean(acc_per_cls) # class accuracy
166
+ return miou.item(), macc.item(), over_all_acc.item(), iou_per_cls.cpu().numpy(), acc_per_cls.cpu().numpy()
167
+
168
+
169
+ def get_mious(tp, union, count):
170
+ iou_per_cls = (tp + 1e-10) / (union + 1e-10) * 100
171
+ acc_per_cls = (tp + 1e-10) / (count + 1e-10) * 100
172
+ over_all_acc = tp.sum() / count.sum() * 100
173
+
174
+ miou = torch.mean(iou_per_cls)
175
+ macc = torch.mean(acc_per_cls) # class accuracy
176
+ return miou.item(), macc.item(), over_all_acc.item(), iou_per_cls.cpu().numpy(), acc_per_cls.cpu().numpy()
177
+
178
+
179
+ def partnet_metrics(num_classes, num_parts, objects, preds, targets):
180
+ """
181
+
182
+ Args:
183
+ num_classes:
184
+ num_parts:
185
+ objects: [int]
186
+ preds:[(num_parts,num_points)]
187
+ targets: [(num_points)]
188
+
189
+ Returns:
190
+
191
+ """
192
+ shape_iou_tot = [0.0] * num_classes
193
+ shape_iou_cnt = [0] * num_classes
194
+ part_intersect = [np.zeros((num_parts[o_l]), dtype=np.float32) for o_l in range(num_classes)]
195
+ part_union = [np.zeros((num_parts[o_l]), dtype=np.float32) + 1e-6 for o_l in range(num_classes)]
196
+
197
+ for obj, cur_pred, cur_gt in zip(objects, preds, targets):
198
+ cur_num_parts = num_parts[obj]
199
+ cur_pred = np.argmax(cur_pred[1:, :], axis=0) + 1
200
+ cur_pred[cur_gt == 0] = 0
201
+ cur_shape_iou_tot = 0.0
202
+ cur_shape_iou_cnt = 0
203
+ for j in range(1, cur_num_parts):
204
+ cur_gt_mask = (cur_gt == j)
205
+ cur_pred_mask = (cur_pred == j)
206
+
207
+ has_gt = (np.sum(cur_gt_mask) > 0)
208
+ has_pred = (np.sum(cur_pred_mask) > 0)
209
+
210
+ if has_gt or has_pred:
211
+ intersect = np.sum(cur_gt_mask & cur_pred_mask)
212
+ union = np.sum(cur_gt_mask | cur_pred_mask)
213
+ iou = intersect / union
214
+
215
+ cur_shape_iou_tot += iou
216
+ cur_shape_iou_cnt += 1
217
+
218
+ part_intersect[obj][j] += intersect
219
+ part_union[obj][j] += union
220
+ if cur_shape_iou_cnt > 0:
221
+ cur_shape_miou = cur_shape_iou_tot / cur_shape_iou_cnt
222
+ shape_iou_tot[obj] += cur_shape_miou
223
+ shape_iou_cnt[obj] += 1
224
+
225
+ msIoU = [shape_iou_tot[o_l] / shape_iou_cnt[o_l] for o_l in range(num_classes)]
226
+ part_iou = [np.divide(part_intersect[o_l][1:], part_union[o_l][1:]) for o_l in range(num_classes)]
227
+ mpIoU = [np.mean(part_iou[o_l]) for o_l in range(num_classes)]
228
+
229
+ # Print instance mean
230
+ mmsIoU = np.mean(np.array(msIoU))
231
+ mmpIoU = np.mean(mpIoU)
232
+
233
+ return msIoU, mpIoU, mmsIoU, mmpIoU
234
+
235
+
236
+ def IoU_from_confusions(confusions):
237
+ """
238
+ Computes IoU from confusion matrices.
239
+ :param confusions: ([..., n_c, n_c] np.int32). Can be any dimension, the confusion matrices should be described by
240
+ the last axes. n_c = number of classes
241
+ :param ignore_unclassified: (bool). True if the the first class should be ignored in the results
242
+ :return: ([..., n_c] np.float32) IoU score
243
+ """
244
+
245
+ # Compute TP, FP, FN. This assume that the second to last axis counts the truths (like the first axis of a
246
+ # confusion matrix), and that the last axis counts the predictions (like the second axis of a confusion matrix)
247
+ TP = np.diagonal(confusions, axis1=-2, axis2=-1)
248
+ TP_plus_FN = np.sum(confusions, axis=-1)
249
+ TP_plus_FP = np.sum(confusions, axis=-2)
250
+
251
+ # Compute IoU
252
+ IoU = TP / (TP_plus_FP + TP_plus_FN - TP + 1e-6)
253
+
254
+ # Compute miou with only the actual classes
255
+ mask = TP_plus_FN < 1e-3
256
+ counts = np.sum(1 - mask, axis=-1, keepdims=True)
257
+ miou = np.sum(IoU, axis=-1, keepdims=True) / (counts + 1e-6)
258
+
259
+ # If class is absent, place miou in place of 0 IoU to get the actual mean later
260
+ IoU += mask * miou
261
+
262
+ return IoU
263
+
264
+
265
+ def shapenetpart_metrics(num_classes, num_parts, objects, preds, targets, masks):
266
+ """
267
+ Args:
268
+ num_classes:
269
+ num_parts:
270
+ objects: [int]
271
+ preds:[(num_parts,num_points)]
272
+ targets: [(num_points)]
273
+ masks: [(num_points)]
274
+ """
275
+ total_correct = 0.0
276
+ total_seen = 0.0
277
+ Confs = []
278
+ for obj, cur_pred, cur_gt, cur_mask in zip(objects, preds, targets, masks):
279
+ obj = int(obj)
280
+ cur_num_parts = num_parts[obj]
281
+ cur_pred = np.argmax(cur_pred, axis=0)
282
+ cur_pred = cur_pred[cur_mask]
283
+ cur_gt = cur_gt[cur_mask]
284
+ correct = np.sum(cur_pred == cur_gt)
285
+ total_correct += correct
286
+ total_seen += cur_pred.shape[0]
287
+ parts = [j for j in range(cur_num_parts)]
288
+ Confs += [confusion_matrix(cur_gt, cur_pred, labels=parts)]
289
+
290
+ Confs = np.array(Confs)
291
+ obj_mious = []
292
+ objects = np.asarray(objects)
293
+ for l in range(num_classes):
294
+ obj_inds = np.where(objects == l)[0]
295
+ obj_confs = np.stack(Confs[obj_inds])
296
+ obj_IoUs = IoU_from_confusions(obj_confs)
297
+ obj_mious += [np.mean(obj_IoUs, axis=-1)]
298
+
299
+ objs_average = [np.mean(mious) for mious in obj_mious]
300
+ instance_average = np.mean(np.hstack(obj_mious))
301
+ class_average = np.mean(objs_average)
302
+ acc = total_correct / total_seen
303
+
304
+ print('Objs | Inst | Air Bag Cap Car Cha Ear Gui Kni Lam Lap Mot Mug Pis Roc Ska Tab')
305
+ print('-----|------|--------------------------------------------------------------------------------')
306
+
307
+ s = '{:4.1f} | {:4.1f} | '.format(100 * class_average, 100 * instance_average)
308
+ for Amiou in objs_average:
309
+ s += '{:4.1f} '.format(100 * Amiou)
310
+ print(s + '\n')
311
+ return acc, objs_average, class_average, instance_average
examples/AutoCls3D_ModelNet40/HIRE-Net/data_transforms.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ def random_point_dropout(batch_pc, max_dropout_ratio=0.875):
5
+ ''' batch_pc: BxNx3 '''
6
+ for b in range(batch_pc.shape[0]):
7
+ dropout_ratio = np.random.random()*max_dropout_ratio # 0~0.875
8
+ drop_idx = np.where(np.random.random((batch_pc.shape[1]))<=dropout_ratio)[0]
9
+ if len(drop_idx)>0:
10
+ batch_pc[b,drop_idx,:] = batch_pc[b,0,:] # set to the first point
11
+ return batch_pc
12
+
13
+ def random_scale_point_cloud(batch_data, scale_low=0.8, scale_high=1.25):
14
+ """ Randomly scale the point cloud. Scale is per point cloud.
15
+ Input:
16
+ BxNx3 array, original batch of point clouds
17
+ Return:
18
+ BxNx3 array, scaled batch of point clouds
19
+ """
20
+ B, N, C = batch_data.shape
21
+ scales = np.random.uniform(scale_low, scale_high, B)
22
+ for batch_index in range(B):
23
+ batch_data[batch_index,:,:] *= scales[batch_index]
24
+ return batch_data
25
+
26
+ def shift_point_cloud(batch_data, shift_range=0.1):
27
+ """ Randomly shift point cloud. Shift is per point cloud.
28
+ Input:
29
+ BxNx3 array, original batch of point clouds
30
+ Return:
31
+ BxNx3 array, shifted batch of point clouds
32
+ """
33
+ B, N, C = batch_data.shape
34
+ shifts = np.random.uniform(-shift_range, shift_range, (B,3))
35
+ for batch_index in range(B):
36
+ batch_data[batch_index,:,:] += shifts[batch_index,:]
37
+ return batch_data
examples/AutoCls3D_ModelNet40/HIRE-Net/experiment.py ADDED
@@ -0,0 +1,565 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from tqdm import tqdm
3
+ import pickle
4
+ import argparse
5
+ import pathlib
6
+ import json
7
+ import time
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.parallel
11
+ import torch.utils.data
12
+ import numpy as np
13
+ import torch.nn.functional as F
14
+ from torch.utils.data import Dataset, DataLoader
15
+ from metrics import ConfusionMatrix
16
+ import data_transforms
17
+ import argparse
18
+ import random
19
+ import traceback
20
+
21
+ """
22
+ Model
23
+ """
24
+ class STN3d(nn.Module):
25
+ def __init__(self, in_channels):
26
+ super(STN3d, self).__init__()
27
+ self.conv_layers = nn.Sequential(
28
+ nn.Conv1d(in_channels, 64, 1),
29
+ nn.BatchNorm1d(64),
30
+ nn.ReLU(inplace=True),
31
+ nn.Conv1d(64, 128, 1),
32
+ nn.BatchNorm1d(128),
33
+ nn.ReLU(inplace=True),
34
+ nn.Conv1d(128, 1024, 1),
35
+ nn.BatchNorm1d(1024),
36
+ nn.ReLU(inplace=True)
37
+ )
38
+ self.linear_layers = nn.Sequential(
39
+ nn.Linear(1024, 512),
40
+ nn.BatchNorm1d(512),
41
+ nn.ReLU(inplace=True),
42
+ nn.Linear(512, 256),
43
+ nn.BatchNorm1d(256),
44
+ nn.ReLU(inplace=True),
45
+ nn.Linear(256, 9)
46
+ )
47
+ self.iden = torch.from_numpy(np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]).astype(np.float32)).reshape(1, 9)
48
+
49
+ def forward(self, x):
50
+ batchsize = x.size()[0]
51
+ x = self.conv_layers(x)
52
+ x = torch.max(x, 2, keepdim=True)[0]
53
+ x = x.view(-1, 1024)
54
+
55
+ x = self.linear_layers(x)
56
+ iden = self.iden.repeat(batchsize, 1).to(x.device)
57
+ x = x + iden
58
+ x = x.view(-1, 3, 3)
59
+ return x
60
+
61
+
62
+ class STNkd(nn.Module):
63
+ def __init__(self, k=64):
64
+ super(STNkd, self).__init__()
65
+ self.conv_layers = nn.Sequential(
66
+ nn.Conv1d(k, 64, 1),
67
+ nn.BatchNorm1d(64),
68
+ nn.ReLU(inplace=True),
69
+ nn.Conv1d(64, 128, 1),
70
+ nn.BatchNorm1d(128),
71
+ nn.ReLU(inplace=True),
72
+ nn.Conv1d(128, 1024, 1),
73
+ nn.BatchNorm1d(1024),
74
+ nn.ReLU(inplace=True)
75
+ )
76
+ self.linear_layers = nn.Sequential(
77
+ nn.Linear(1024, 512),
78
+ nn.BatchNorm1d(512),
79
+ nn.ReLU(inplace=True),
80
+ nn.Linear(512, 256),
81
+ nn.BatchNorm1d(256),
82
+ nn.ReLU(inplace=True),
83
+ nn.Linear(256, k * k)
84
+ )
85
+ self.k = k
86
+ self.iden = torch.from_numpy(np.eye(self.k).flatten().astype(np.float32)).reshape(1, self.k * self.k)
87
+
88
+ def forward(self, x):
89
+ batchsize = x.size()[0]
90
+ x = self.conv_layers(x)
91
+ x = torch.max(x, 2, keepdim=True)[0]
92
+ x = x.view(-1, 1024)
93
+ x = self.linear_layers(x)
94
+ iden = self.iden.repeat(batchsize, 1).to(x.device)
95
+ x = x + iden
96
+ x = x.view(-1, self.k, self.k)
97
+ return x
98
+
99
+
100
+ class EnhancedSTN(nn.Module):
101
+ """
102
+ Enhanced Spatial Transformer Network with improved rotation equivariance.
103
+ """
104
+ def __init__(self, in_channels):
105
+ super(EnhancedSTN, self).__init__()
106
+ self.conv_layers = nn.Sequential(
107
+ nn.Conv1d(in_channels, 64, 1),
108
+ nn.BatchNorm1d(64),
109
+ nn.ReLU(inplace=True),
110
+ nn.Conv1d(64, 128, 1),
111
+ nn.BatchNorm1d(128),
112
+ nn.ReLU(inplace=True),
113
+ nn.Conv1d(128, 1024, 1),
114
+ nn.BatchNorm1d(1024),
115
+ nn.ReLU(inplace=True)
116
+ )
117
+ self.linear_layers = nn.Sequential(
118
+ nn.Linear(1024, 512),
119
+ nn.BatchNorm1d(512),
120
+ nn.ReLU(inplace=True),
121
+ nn.Linear(512, 256),
122
+ nn.BatchNorm1d(256),
123
+ nn.ReLU(inplace=True),
124
+ nn.Linear(256, 9)
125
+ )
126
+ self.iden = torch.from_numpy(np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]).astype(np.float32)).reshape(1, 9)
127
+
128
+ # Orthogonality regularization weight
129
+ self.ortho_weight = 0.01
130
+
131
+ def forward(self, x):
132
+ batchsize = x.size()[0]
133
+ x = self.conv_layers(x)
134
+ x = torch.max(x, 2, keepdim=True)[0]
135
+ x = x.view(-1, 1024)
136
+
137
+ x = self.linear_layers(x)
138
+ iden = self.iden.repeat(batchsize, 1).to(x.device)
139
+ x = x + iden
140
+ x = x.view(-1, 3, 3)
141
+
142
+ # Apply soft orthogonality constraint to ensure rotation matrix properties
143
+ # This helps maintain rotation equivariance
144
+ ortho_loss = torch.mean(torch.norm(
145
+ torch.bmm(x, x.transpose(2, 1)) - torch.eye(3, device=x.device).unsqueeze(0), dim=(1, 2)
146
+ ))
147
+
148
+ return x, self.ortho_weight * ortho_loss
149
+
150
+ class PointNetEncoder(nn.Module):
151
+ def __init__(self, global_feat=True, feature_transform=False, in_channels=3, num_alignments=2):
152
+ super(PointNetEncoder, self).__init__()
153
+
154
+ self.stn = EnhancedSTN(in_channels)
155
+
156
+
157
+ self.conv_layer1 = nn.Sequential(
158
+ nn.Conv1d(in_channels, 64, 1),
159
+ nn.BatchNorm1d(64),
160
+ nn.ReLU(inplace=True),
161
+ nn.Conv1d(64, 64, 1),
162
+ nn.BatchNorm1d(64),
163
+ nn.ReLU(inplace=True)
164
+ )
165
+ self.conv_layer2 = nn.Sequential(
166
+ nn.Conv1d(64, 64, 1),
167
+ nn.BatchNorm1d(64),
168
+ nn.ReLU(inplace=True)
169
+ )
170
+ self.conv_layer3 = nn.Sequential(
171
+ nn.Conv1d(64, 128, 1),
172
+ nn.BatchNorm1d(128),
173
+ nn.ReLU(inplace=True)
174
+ )
175
+ self.conv_layer4 = nn.Sequential(
176
+ nn.Conv1d(128, 1024, 1),
177
+ nn.BatchNorm1d(1024)
178
+ )
179
+ self.global_feat = global_feat
180
+ self.feature_transform = feature_transform
181
+ if self.feature_transform:
182
+ self.fstn = STNkd(k=64)
183
+
184
+
185
+ self.ortho_loss = 0
186
+
187
+ def forward(self, x):
188
+ B, D, N = x.size()
189
+
190
+ trans, ortho_loss = self.stn(x)
191
+ self.ortho_loss = ortho_loss
192
+
193
+ x_aligned = x.transpose(2, 1)
194
+ if D > 3:
195
+ feature = x_aligned[:, :, 3:]
196
+ coords = x_aligned[:, :, :3]
197
+ coords = torch.bmm(coords, trans)
198
+ x_aligned = torch.cat([coords, feature], dim=2)
199
+ else:
200
+ x_aligned = torch.bmm(x_aligned, trans)
201
+ x_aligned = x_aligned.transpose(2, 1)
202
+
203
+
204
+ x = self.conv_layer1(x_aligned)
205
+
206
+ if self.feature_transform:
207
+ trans_feat = self.fstn(x)
208
+ x = x.transpose(2, 1)
209
+ x = torch.bmm(x, trans_feat)
210
+ x = x.transpose(2, 1)
211
+ else:
212
+ trans_feat = None
213
+
214
+ pointfeat = x
215
+ x = self.conv_layer2(x)
216
+ x = self.conv_layer3(x)
217
+ x = self.conv_layer4(x)
218
+ x = torch.max(x, 2, keepdim=True)[0]
219
+ x = x.view(-1, 1024)
220
+
221
+ graph = construct_graph(x, args.k)
222
+ context_features = compute_context_aware_features(x, graph)
223
+ x = x + context_features
224
+
225
+ if self.global_feat:
226
+ return x, trans, trans_feat
227
+ else:
228
+ x = x.view(-1, 1024, 1).repeat(1, 1, N)
229
+ return torch.cat([x, pointfeat], 1), trans, trans_feat
230
+
231
+
232
+
233
+ def construct_graph(points, k):
234
+ """
235
+ Construct a dynamic graph where nodes represent points and edges capture semantic similarities.
236
+ """
237
+ # Compute pairwise distances
238
+ dist = torch.cdist(points, points)
239
+ # Get the top k neighbors
240
+ _, indices = torch.topk(dist, k, largest=False, dim=1)
241
+ return indices
242
+
243
+ def compute_attention_weights(points, graph, epsilon=0.01):
244
+ """
245
+ Compute attention weights with energy-based normalization for numerical stability.
246
+ Improved implementation with better numerical stability and efficiency.
247
+
248
+ Args:
249
+ points: Input feature points [B, N, C]
250
+ graph: Neighborhood indices [B, N, K]
251
+ epsilon: Regularization parameter for bounded energy constraints
252
+
253
+ Returns:
254
+ Attention weights that satisfy bounded energy constraints
255
+ """
256
+ num_points = points.shape[0]
257
+ k = graph.shape[1]
258
+ attention_weights = torch.zeros(num_points, k, device=points.device)
259
+
260
+ for i in range(num_points):
261
+ neighbors = graph[i]
262
+
263
+ center_feat = points[i].unsqueeze(0) # [1, C]
264
+ neighbor_feats = points[neighbors] # [k, C]
265
+
266
+ center_norm = torch.norm(center_feat, dim=1, keepdim=True)
267
+ neighbor_norms = torch.norm(neighbor_feats, dim=1, keepdim=True)
268
+
269
+ center_norm = torch.clamp(center_norm, min=1e-8)
270
+ neighbor_norms = torch.clamp(neighbor_norms, min=1e-8)
271
+
272
+ center_feat_norm = center_feat / center_norm
273
+ neighbor_feats_norm = neighbor_feats / neighbor_norms
274
+
275
+ similarity = torch.sum(center_feat_norm * neighbor_feats_norm, dim=1)
276
+
277
+ weights = torch.exp(similarity)
278
+
279
+ norm_const = torch.sum(weights) + 1e-8
280
+ weights = weights / norm_const
281
+
282
+ sq_sum = torch.sum(weights * weights)
283
+ if sq_sum > epsilon:
284
+ scale_factor = torch.sqrt(epsilon / sq_sum)
285
+ weights = weights * scale_factor
286
+
287
+ attention_weights[i, :len(neighbors)] = weights
288
+
289
+ return attention_weights
290
+
291
+ def compute_context_aware_features(points, graph):
292
+ """
293
+ Compute context-aware feature adjustments using the constructed graph.
294
+ Enhanced with edge-aware attention pooling (EEGA) and improved stability.
295
+ """
296
+ # Calculate weighted edge features
297
+ context_features = torch.zeros_like(points)
298
+
299
+ # Compute attention weights with energy constraints
300
+ attention_weights = compute_attention_weights(points, graph, epsilon=args.epsilon)
301
+
302
+ # Calculate weighted edge features
303
+ for i in range(points.size(0)):
304
+ neighbors = graph[i]
305
+ weights = attention_weights[i, :len(neighbors)].unsqueeze(1)
306
+
307
+ # Calculate weighted edge features (φ_local(p_j) - φ_local(p_i))
308
+ # Using hybrid method: consider both differences and original features
309
+ edge_features = points[neighbors] - points[i].unsqueeze(0)
310
+ neighbor_features = points[neighbors]
311
+
312
+ # Weight edge features and neighbor features
313
+ weighted_edges = edge_features * weights * 0.5
314
+ weighted_neighbors = neighbor_features * weights * 0.5
315
+
316
+ # Aggregate features: combine edge differences and neighbor information
317
+ context_features[i] = torch.sum(weighted_edges, dim=0) + torch.sum(weighted_neighbors, dim=0)
318
+
319
+ return context_features
320
+
321
+ def feature_transform_reguliarzer(trans):
322
+ d = trans.size()[1]
323
+ I = torch.eye(d)[None, :, :]
324
+ if trans.is_cuda:
325
+ I = I.cuda()
326
+ loss = torch.mean(torch.norm(torch.bmm(trans, trans.transpose(2, 1)) - I, dim=(1, 2)))
327
+ return loss
328
+
329
+
330
+ class Model(nn.Module):
331
+ def __init__(self, in_channels=3, num_classes=40, scale=0.001, num_alignments=2):
332
+ super().__init__()
333
+ self.mat_diff_loss_scale = scale
334
+ self.in_channels = in_channels
335
+ self.backbone = PointNetEncoder(
336
+ global_feat=True,
337
+ feature_transform=True,
338
+ in_channels=in_channels,
339
+ num_alignments=num_alignments
340
+ )
341
+
342
+ self.cls_head = nn.Sequential(
343
+ nn.Linear(1024, 512),
344
+ nn.BatchNorm1d(512),
345
+ nn.ReLU(inplace=True),
346
+ nn.Linear(512, 256),
347
+ nn.Dropout(p=0.4),
348
+ nn.BatchNorm1d(256),
349
+ nn.ReLU(inplace=True),
350
+ nn.Linear(256, num_classes)
351
+ )
352
+
353
+ def forward(self, x, gts):
354
+
355
+ global_features, trans, trans_feat = self.backbone(x)
356
+
357
+ x = self.cls_head(global_features)
358
+ x = F.log_softmax(x, dim=1)
359
+
360
+ loss = F.nll_loss(x, gts)
361
+ mat_diff_loss = feature_transform_reguliarzer(trans_feat)
362
+ ortho_loss = self.backbone.ortho_loss
363
+
364
+ total_loss = loss + mat_diff_loss * self.mat_diff_loss_scale + ortho_loss
365
+
366
+ return total_loss, x
367
+
368
+
369
+ """
370
+ dataset and normalization
371
+ """
372
+ def pc_normalize(pc):
373
+ centroid = np.mean(pc, axis=0)
374
+ pc = pc - centroid
375
+ m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
376
+ pc = pc / m
377
+ return pc
378
+
379
+
380
+ class ModelNetDataset(Dataset):
381
+ def __init__(self, data_root, num_category, num_points, split='train'):
382
+ self.root = data_root
383
+ self.npoints = num_points
384
+ self.uniform = True
385
+ self.use_normals = True
386
+ self.num_category = num_category
387
+
388
+ if self.num_category == 10:
389
+ self.catfile = os.path.join(self.root, 'modelnet10_shape_names.txt')
390
+ else:
391
+ self.catfile = os.path.join(self.root, 'modelnet40_shape_names.txt')
392
+
393
+ self.cat = [line.rstrip() for line in open(self.catfile)]
394
+ self.classes = dict(zip(self.cat, range(len(self.cat))))
395
+
396
+ shape_ids = {}
397
+ if self.num_category == 10:
398
+ shape_ids['train'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet10_train.txt'))]
399
+ shape_ids['test'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet10_test.txt'))]
400
+ else:
401
+ shape_ids['train'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet40_train.txt'))]
402
+ shape_ids['test'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet40_test.txt'))]
403
+
404
+ assert (split == 'train' or split == 'test')
405
+ shape_names = ['_'.join(x.split('_')[0:-1]) for x in shape_ids[split]]
406
+ self.datapath = [(shape_names[i], os.path.join(self.root, shape_names[i], shape_ids[split][i]) + '.txt') for i
407
+ in range(len(shape_ids[split]))]
408
+ print('The size of %s data is %d' % (split, len(self.datapath)))
409
+
410
+ if self.uniform:
411
+ self.data_path = os.path.join(data_root, 'modelnet%d_%s_%dpts_fps.dat' % (self.num_category, split, self.npoints))
412
+ else:
413
+ self.data_path = os.path.join(data_root, 'modelnet%d_%s_%dpts.dat' % (self.num_category, split, self.npoints))
414
+
415
+ print('Load processed data from %s...' % self.data_path)
416
+ with open(self.data_path, 'rb') as f:
417
+ self.list_of_points, self.list_of_labels = pickle.load(f)
418
+
419
+ def __len__(self):
420
+ return len(self.datapath)
421
+
422
+ def __getitem__(self, index):
423
+ point_set, label = self.list_of_points[index], self.list_of_labels[index]
424
+ point_set[:, 0:3] = pc_normalize(point_set[:, 0:3])
425
+ if not self.use_normals:
426
+ point_set = point_set[:, 0:3]
427
+ return point_set, label[0]
428
+
429
+
430
+ def seed_everything(seed=11):
431
+ random.seed(seed)
432
+ np.random.seed(seed)
433
+ torch.manual_seed(seed)
434
+ torch.cuda.manual_seed(seed)
435
+ torch.cuda.manual_seed_all(seed)
436
+ torch.backends.cudnn.deterministic = True
437
+ torch.backends.cudnn.benchmark = False
438
+
439
+
440
+ def main(args):
441
+
442
+ seed_everything(args.seed)
443
+
444
+ final_infos = {}
445
+ all_results = {}
446
+
447
+ pathlib.Path(args.out_dir).mkdir(parents=True, exist_ok=True)
448
+
449
+ datasets, dataloaders = {}, {}
450
+ for split in ['train', 'test']:
451
+ datasets[split] = ModelNetDataset(args.data_root, args.num_category, args.num_points, split)
452
+ dataloaders[split] = DataLoader(datasets[split], batch_size=args.batch_size, shuffle=(split == 'train'),
453
+ drop_last=(split == 'train'), num_workers=8)
454
+
455
+ model = Model(in_channels=args.in_channels, num_alignments=args.num_alignments).cuda()
456
+ optimizer = torch.optim.Adam(
457
+ model.parameters(), lr=args.learning_rate,
458
+ betas=(0.9, 0.999), eps=1e-8,
459
+ weight_decay=1e-4
460
+ )
461
+ scheduler = torch.optim.lr_scheduler.StepLR(
462
+ optimizer, step_size=20, gamma=0.7
463
+ )
464
+ train_losses = []
465
+ print("Training model...")
466
+ model.train()
467
+ global_step = 0
468
+ cur_epoch = 0
469
+ best_oa = 0
470
+ best_acc = 0
471
+
472
+ start_time = time.time()
473
+ for epoch in tqdm(range(args.max_epoch), desc='training'):
474
+ model.train()
475
+ cm = ConfusionMatrix(num_classes=len(datasets['train'].classes))
476
+ for points, target in tqdm(dataloaders['train'], desc=f'epoch {cur_epoch}/{args.max_epoch}'):
477
+ # data transforms
478
+ points = points.data.numpy()
479
+ points = data_transforms.random_point_dropout(points)
480
+ points[:, :, 0:3] = data_transforms.random_scale_point_cloud(points[:, :, 0:3])
481
+ points[:, :, 0:3] = data_transforms.shift_point_cloud(points[:, :, 0:3])
482
+ points = torch.from_numpy(points).transpose(2, 1).contiguous()
483
+
484
+ points, target = points.cuda(), target.long().cuda()
485
+
486
+ loss, logits = model(points, target)
487
+ loss.backward()
488
+
489
+ torch.nn.utils.clip_grad_norm_(model.parameters(), 1, norm_type=2)
490
+ optimizer.step()
491
+ model.zero_grad()
492
+
493
+
494
+ logs = {"loss": loss.detach().item()}
495
+ train_losses.append(loss.detach().item())
496
+ cm.update(logits.argmax(dim=1), target)
497
+
498
+ scheduler.step()
499
+ end_time = time.time()
500
+ training_time = end_time - start_time
501
+ macc, overallacc, accs = cm.all_acc()
502
+ print(f"iter: {global_step}/{args.max_epoch*len(dataloaders['train'])}, \
503
+ train_macc: {macc}, train_oa: {overallacc}")
504
+
505
+ if (cur_epoch % args.val_per_epoch == 0 and cur_epoch != 0) or cur_epoch == (args.max_epoch - 1):
506
+ model.eval()
507
+ cm = ConfusionMatrix(num_classes=datasets['test'].num_category)
508
+ pbar = tqdm(enumerate(dataloaders['test']), total=dataloaders['test'].__len__())
509
+ # with torch.no_grad():
510
+ for idx, (points, target) in pbar:
511
+ points, target = points.cuda(), target.long().cuda()
512
+ points = points.transpose(2, 1).contiguous()
513
+ loss, logits = model(points, target)
514
+ cm.update(logits.argmax(dim=1), target)
515
+
516
+ tp, count = cm.tp, cm.count
517
+ macc, overallacc, accs = cm.cal_acc(tp, count)
518
+ print(f"iter: {global_step}/{args.max_epoch*len(dataloaders['train'])}, \
519
+ val_macc: {macc}, val_oa: {overallacc}")
520
+
521
+ if overallacc > best_oa:
522
+ best_oa = overallacc
523
+ best_acc = macc
524
+ best_epoch = cur_epoch
525
+ torch.save(model.state_dict(), os.path.join(args.out_dir, 'best.pth'))
526
+ cur_epoch += 1
527
+
528
+ print(f"finish epoch {cur_epoch} training")
529
+
530
+ final_infos = {
531
+ "modelnet" + str(args.num_category):{
532
+ "means":{
533
+ "best_oa": best_oa,
534
+ "best_acc": best_acc,
535
+ "epoch": best_epoch
536
+ }
537
+ }
538
+ }
539
+ with open(os.path.join(args.out_dir, "final_info.json"), "w") as f:
540
+ json.dump(final_infos, f)
541
+
542
+ if __name__ == "__main__":
543
+
544
+ parser = argparse.ArgumentParser()
545
+ parser.add_argument("--batch_size", type=int, default=64)
546
+ parser.add_argument("--out_dir", type=str, default="run_0")
547
+ parser.add_argument("--in_channels", type=int, default=6)
548
+ parser.add_argument("--num_points", type=int, default=1024)
549
+ parser.add_argument("--num_category", type=int, choices=[10, 40], default=40)
550
+ parser.add_argument("--data_root", type=str, default='./datasets/modelnet40')
551
+ parser.add_argument("--learning_rate", type=float, default=1e-3)
552
+ parser.add_argument("--max_epoch", type=int, default=200)
553
+ parser.add_argument("--val_per_epoch", type=int, default=5)
554
+ parser.add_argument("--k", type=int, default=16, help="Number of neighbors for graph construction")
555
+ parser.add_argument("--num_alignments", type=int, default=2, help="Number of rotational alignments for RE-MA")
556
+ parser.add_argument("--epsilon", type=float, default=0.05, help="Regularization parameter for attention weights")
557
+ parser.add_argument("--seed", type=int, default=666)
558
+ args = parser.parse_args()
559
+
560
+ try:
561
+ main(args)
562
+ except Exception as e:
563
+ print("Original error in subprocess:", flush=True)
564
+ traceback.print_exc(file=open(os.path.join(args.out_dir, "traceback.log"), "w"))
565
+ raise
examples/AutoCls3D_ModelNet40/HIRE-Net/idea.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "HIRE-Net",
3
+ "title": "Hierarchical Rotation-Equivariant Network with Efficient Edge-Aware Integration for 3D Point Cloud Classification",
4
+ "description": "HIRE-Net is a novel framework designed to enhance 3D point cloud classification through improved mathematical consistency and computational efficiency in rotation-equivariant and noise-resilient mechanisms. It introduces a flexible hierarchical design by incorporating (1) multi-alignments rotation-equivariant convolutions for robust local geometric encoding, and (2) an efficient edge-aware global embedding with attention-weight regularization to ensure scalability and numerical stability. These refinements directly respond to empirical and theoretical critiques of computational overhead and theoretical guarantees, achieving enhanced scalability and robustness to real-world dataset sizes.",
5
+ "statement": "HIRE-Net represents a significant advancement in 3D point cloud classification by overcoming key limitations of prior art through (1) the integration of multi-alignments rotation-equivariant convolutions, inspired by recent SO(3) transformations literature, for scalable and robust local embeddings, and (2) a novel edge-aware embedding mechanism utilizing attention weight normalization for efficient computation and noise resilience. Theoretical contributions include providing rotation-equivariant local descriptors in alignment with group convolution theory and mathematically justifying the stability of attention-based global aggregation with regularized energy functions. These contributions address previous critiques on computational inefficiency and lack of theoretical support, producing a framework that ensures robustness under rotations, scalability, and detailed geometric feature preservation.",
6
+ "method": "### System Architecture\n#### Overview\nThe HIRE-Net framework builds a hierarchical system for processing 3D point clouds, ensuring efficient and robust feature learning. It features two key innovations:\n1. **Multi-Alignments Rotation-Equivariant Local Encoding (RE-MA):** Extends rotation-equivariant convolutions by integrating multiple rotational alignments, creating invariant local embeddings that maintain robustness across arbitrary transformations.\n2. **Efficient Edge-Aware Global Aggregation (EEGA):** Employs edge-aware attention pooling with energy-based normalization to aggregate global features, ensuring numerical stability and computational efficiency.\n\nThe modular pipeline improves scalability and guarantees consistent interaction between components while addressing empirically observed shortcomings such as rotation-induced artifacts, noise sensitivity, and inefficiencies in large datasets.\n\n#### Method Components\n1. **Multi-Alignments Rotation-Equivariant Local Encoding (RE-MA):**\n - For each input point cloud, apply group-equivariant convolutions over local neighborhoods using multiple SO(3) alignments:\n \\[\n \\phi_{local,j}(\\mathbf{p}_i) = \\sigma\\left( W_j * T_{g_j}(\\mathbf{p}_i) \\right), \\quad g_j \\in SO(3)\n \\]\n - Here, \\(g_j\\) represents one of \\(M\\) discrete rotational alignments, \\(T_{g_j}\\) is the transformation under \\(g_j\\), and \\(W_j\\) are learnable convolution parameters for the \\(j^{th}\\) alignment.\n - Aggregate features over \\(M\\) alignments:\n \\[\n \\phi_{local}(\\mathbf{p}_i) = \\text{Max/Mean-Pooling}_{j=1}^M \\left( \\phi_{local,j}(\\mathbf{p}_i) \\right).\n \\]\n - This strategy retains rotational equivariance while reducing artifacts induced by single-group alignment discretizations.\n\n2. **Efficient Edge-Aware Global Aggregation (EEGA):**\n - Define edge features as:\n \\[\n \\mathbf{E}_i = \\sum_{\\mathbf{p}_j \\in \\mathcal{N}(\\mathbf{p}_i)} \\alpha(\\mathbf{p}_i, \\mathbf{p}_j) \\left( \\phi_{local}(\\mathbf{p}_j) - \\phi_{local}(\\mathbf{p}_i) \\right),\n \\]\n where \\(\\alpha(\\mathbf{p}_i, \\mathbf{p}_j)\\) is the attention weight given by:\n \\[\n \\alpha(\\mathbf{p}_i, \\mathbf{p}_j) = \\frac{\\exp(-||\\mathbf{p}_i - \\mathbf{p}_j||_2^2)}{\\sum_{\\mathbf{p}_k \\in \\mathcal{N}(\\mathbf{p}_i)} \\exp(-||\\mathbf{p}_i - \\mathbf{p}_k||_2^2)}.\n \\]\n - Enforce stability via attention-weight normalization, ensuring that any aggregated contribution adheres to bounded energy constraints:\n \\[\n \\sum_{\\mathbf{p}_j \\in \\mathcal{N}(\\mathbf{p}_i)} \\alpha(\\mathbf{p}_i, \\mathbf{p}_j)^2 \\leq \\epsilon,\n \\]\n where \\(\\epsilon\\) is a predefined regularization parameter ensuring computational stability in large-scale scenarios.\n\n3. **Hierarchical Fusion for Final Classification:**\n - Compute the global embedding via edge-aware pooling:\n \\[\n \\mathbf{F}_{global} = \\text{Max-Pool}\\left( \\{ \\mathbf{E}_i \\}_{i=1}^N \\right).\n \\]\n - Integrate multi-scale features adaptively:\n \\[\n \\mathbf{F}_{final} = f_{ACDM}(\\mathbf{F}_{local}, \\mathbf{F}_{global}),\n \\]\n where \\(f_{ACDM}(\\cdot)\\) is an attention-based fusion mechanism. Weighted contributions are dynamically learned based on the relevance of local versus global embeddings.\n - Class prediction is performed using softmax activation over the fused vector \\(\\mathbf{F}_{final}\\):\n \\[\n \\hat{y} = \\text{Softmax}(W_{cls} \\mathbf{F}_{final}).\n \\]\n\n#### Theoretical Properties\n1. **Rotation-Equivariance:** Multi-alignment convolutions ensure that local descriptors are consistent across full rotations in SO(3).\n2. **Numerical Stability:** Regularization of attention weights in EEGA prevents numerical instabilities that arise in softmax computations over large neighborhoods, guaranteeing scalability.\n3. **Computational Complexity:** The hierarchical pipeline scales as \\(O(NkM)\\), with \\(k\\) being the neighborhood size and \\(M\\) the number of alignments, ensuring competitive efficiency even for large-scale point clouds.\n\n#### Summary Algorithm\n**Algorithm 1: HIRE-Net for 3D Point Cloud Classification**\n1. **Input:** Point cloud \\(P = \\{ \\mathbf{p}_i \\}_{i=1}^N\\).\n2. Compute multi-alignment RE-MA features for each point.\n3. Identify local neighborhoods \\(\\mathcal{N}(\\mathbf{p}_i)\\) via k-nearest neighbors.\n4. Compute edge-aware features with EEGA using attention-weight normalization.\n5. Aggregate global embeddings via max-pooling.\n6. Fuse local and global features adaptively.\n7. Perform final classification using a fully connected layer and softmax.\n8. **Output:** Predicted class label \\(\\hat{y}\\).\n\nThis refined framework achieves a balance of mathematical rigor, novel insights, and practical feasibility, addressing previous shortcomings while providing a modular, scalable approach for 3D point cloud classification."
7
+ }
examples/AutoCls3D_ModelNet40/HIRE-Net/launcher.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ python experiment.py \
2
+ --out_dir run_1 \
3
+ --data_root ./datasets/modelnet40 \
4
+ --max_epoch 200 \
5
+ --val_per_epoch 5
examples/AutoCls3D_ModelNet40/HIRE-Net/metrics.py ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from math import log10
2
+ import numpy as np
3
+ import torch
4
+ from sklearn.metrics import confusion_matrix
5
+ import logging
6
+
7
+
8
+ def PSNR(mse, peak=1.):
9
+ return 10 * log10((peak ** 2) / mse)
10
+
11
+
12
+ class SegMetric:
13
+ def __init__(self, values=0.):
14
+ assert isinstance(values, dict)
15
+ self.miou = values.miou
16
+ self.oa = values.get('oa', None)
17
+ self.miou = values.miou
18
+ self.miou = values.miou
19
+
20
+
21
+ def better_than(self, other):
22
+ if self.acc > other.acc:
23
+ return True
24
+ else:
25
+ return False
26
+
27
+ def state_dict(self):
28
+ _dict = dict()
29
+ _dict['acc'] = self.acc
30
+ return _dict
31
+
32
+
33
+ class AverageMeter(object):
34
+ """Computes and stores the average and current value"""
35
+ def __init__(self):
36
+ self.reset()
37
+
38
+ def reset(self):
39
+ self.val = 0
40
+ self.avg = 0
41
+ self.sum = 0
42
+ self.count = 0
43
+
44
+ def update(self, val, n=1):
45
+ self.val = val
46
+ self.sum += val * n
47
+ self.count += n
48
+ self.avg = self.sum / self.count
49
+
50
+
51
+ class ConfusionMatrix:
52
+ """Accumulate a confusion matrix for a classification task.
53
+ ignore_index only supports index <0, or > num_classes
54
+ """
55
+
56
+ def __init__(self, num_classes, ignore_index=None):
57
+ self.value = 0
58
+ self.num_classes = num_classes
59
+ self.virtual_num_classes = num_classes + 1 if ignore_index is not None else num_classes
60
+ self.ignore_index = ignore_index
61
+
62
+ @torch.no_grad()
63
+ def update(self, pred, true):
64
+ """Update the confusion matrix with the given predictions."""
65
+ true = true.flatten()
66
+ pred = pred.flatten()
67
+ if self.ignore_index is not None:
68
+ if (true == self.ignore_index).sum() > 0:
69
+ pred[true == self.ignore_index] = self.virtual_num_classes -1
70
+ true[true == self.ignore_index] = self.virtual_num_classes -1
71
+ unique_mapping = true.flatten() * self.virtual_num_classes + pred.flatten()
72
+ bins = torch.bincount(unique_mapping, minlength=self.virtual_num_classes**2)
73
+ self.value += bins.view(self.virtual_num_classes, self.virtual_num_classes)[:self.num_classes, :self.num_classes]
74
+
75
+ def reset(self):
76
+ """Reset all accumulated values."""
77
+ self.value = 0
78
+
79
+ @property
80
+ def tp(self):
81
+ """Get the true positive samples per-class."""
82
+ return self.value.diag()
83
+
84
+ @property
85
+ def actual(self):
86
+ """Get the false negative samples per-class."""
87
+ return self.value.sum(dim=1)
88
+
89
+ @property
90
+ def predicted(self):
91
+ """Get the false negative samples per-class."""
92
+ return self.value.sum(dim=0)
93
+
94
+ @property
95
+ def fn(self):
96
+ """Get the false negative samples per-class."""
97
+ return self.actual - self.tp
98
+
99
+ @property
100
+ def fp(self):
101
+ """Get the false positive samples per-class."""
102
+ return self.predicted - self.tp
103
+
104
+ @property
105
+ def tn(self):
106
+ """Get the true negative samples per-class."""
107
+ actual = self.actual
108
+ predicted = self.predicted
109
+ return actual.sum() + self.tp - (actual + predicted)
110
+
111
+ @property
112
+ def count(self): # a.k.a. actual positive class
113
+ """Get the number of samples per-class."""
114
+ # return self.tp + self.fn
115
+ return self.value.sum(dim=1)
116
+
117
+ @property
118
+ def frequency(self):
119
+ """Get the per-class frequency."""
120
+ # we avoid dividing by zero using: max(denomenator, 1)
121
+ # return self.count / self.total.clamp(min=1)
122
+ count = self.value.sum(dim=1)
123
+ return count / count.sum().clamp(min=1)
124
+
125
+ @property
126
+ def total(self):
127
+ """Get the total number of samples."""
128
+ return self.value.sum()
129
+
130
+ @property
131
+ def overall_accuray(self):
132
+ return self.tp.sum() / self.total
133
+
134
+ @property
135
+ def union(self):
136
+ return self.value.sum(dim=0) + self.value.sum(dim=1) - self.value.diag()
137
+
138
+ def all_acc(self):
139
+ return self.cal_acc(self.tp, self.count)
140
+
141
+ @staticmethod
142
+ def cal_acc(tp, count):
143
+ acc_per_cls = tp / count.clamp(min=1) * 100
144
+ over_all_acc = tp.sum() / count.sum() * 100
145
+ macc = torch.mean(acc_per_cls) # class accuracy
146
+ return macc.item(), over_all_acc.item(), acc_per_cls.cpu().numpy()
147
+
148
+ @staticmethod
149
+ def print_acc(accs):
150
+ out = '\n Class ' + ' Acc '
151
+ for i, values in enumerate(accs):
152
+ out += '\n' + str(i).rjust(8) + f'{values.item():.2f}'.rjust(8)
153
+ out += '\n' + '-' * 20
154
+ out += '\n' + ' Mean ' + f'{torch.mean(accs).item():.2f}'.rjust(8)
155
+ logging.info(out)
156
+
157
+ def all_metrics(self):
158
+ tp, fp, fn = self.tp, self.fp, self.fn,
159
+
160
+ iou_per_cls = tp / (tp + fp + fn).clamp(min=1) * 100
161
+ acc_per_cls = tp / self.count.clamp(min=1) * 100
162
+ over_all_acc = tp.sum() / self.total * 100
163
+
164
+ miou = torch.mean(iou_per_cls)
165
+ macc = torch.mean(acc_per_cls) # class accuracy
166
+ return miou.item(), macc.item(), over_all_acc.item(), iou_per_cls.cpu().numpy(), acc_per_cls.cpu().numpy()
167
+
168
+
169
+ def get_mious(tp, union, count):
170
+ iou_per_cls = (tp + 1e-10) / (union + 1e-10) * 100
171
+ acc_per_cls = (tp + 1e-10) / (count + 1e-10) * 100
172
+ over_all_acc = tp.sum() / count.sum() * 100
173
+
174
+ miou = torch.mean(iou_per_cls)
175
+ macc = torch.mean(acc_per_cls) # class accuracy
176
+ return miou.item(), macc.item(), over_all_acc.item(), iou_per_cls.cpu().numpy(), acc_per_cls.cpu().numpy()
177
+
178
+
179
+ def partnet_metrics(num_classes, num_parts, objects, preds, targets):
180
+ """
181
+
182
+ Args:
183
+ num_classes:
184
+ num_parts:
185
+ objects: [int]
186
+ preds:[(num_parts,num_points)]
187
+ targets: [(num_points)]
188
+
189
+ Returns:
190
+
191
+ """
192
+ shape_iou_tot = [0.0] * num_classes
193
+ shape_iou_cnt = [0] * num_classes
194
+ part_intersect = [np.zeros((num_parts[o_l]), dtype=np.float32) for o_l in range(num_classes)]
195
+ part_union = [np.zeros((num_parts[o_l]), dtype=np.float32) + 1e-6 for o_l in range(num_classes)]
196
+
197
+ for obj, cur_pred, cur_gt in zip(objects, preds, targets):
198
+ cur_num_parts = num_parts[obj]
199
+ cur_pred = np.argmax(cur_pred[1:, :], axis=0) + 1
200
+ cur_pred[cur_gt == 0] = 0
201
+ cur_shape_iou_tot = 0.0
202
+ cur_shape_iou_cnt = 0
203
+ for j in range(1, cur_num_parts):
204
+ cur_gt_mask = (cur_gt == j)
205
+ cur_pred_mask = (cur_pred == j)
206
+
207
+ has_gt = (np.sum(cur_gt_mask) > 0)
208
+ has_pred = (np.sum(cur_pred_mask) > 0)
209
+
210
+ if has_gt or has_pred:
211
+ intersect = np.sum(cur_gt_mask & cur_pred_mask)
212
+ union = np.sum(cur_gt_mask | cur_pred_mask)
213
+ iou = intersect / union
214
+
215
+ cur_shape_iou_tot += iou
216
+ cur_shape_iou_cnt += 1
217
+
218
+ part_intersect[obj][j] += intersect
219
+ part_union[obj][j] += union
220
+ if cur_shape_iou_cnt > 0:
221
+ cur_shape_miou = cur_shape_iou_tot / cur_shape_iou_cnt
222
+ shape_iou_tot[obj] += cur_shape_miou
223
+ shape_iou_cnt[obj] += 1
224
+
225
+ msIoU = [shape_iou_tot[o_l] / shape_iou_cnt[o_l] for o_l in range(num_classes)]
226
+ part_iou = [np.divide(part_intersect[o_l][1:], part_union[o_l][1:]) for o_l in range(num_classes)]
227
+ mpIoU = [np.mean(part_iou[o_l]) for o_l in range(num_classes)]
228
+
229
+ # Print instance mean
230
+ mmsIoU = np.mean(np.array(msIoU))
231
+ mmpIoU = np.mean(mpIoU)
232
+
233
+ return msIoU, mpIoU, mmsIoU, mmpIoU
234
+
235
+
236
+ def IoU_from_confusions(confusions):
237
+ """
238
+ Computes IoU from confusion matrices.
239
+ :param confusions: ([..., n_c, n_c] np.int32). Can be any dimension, the confusion matrices should be described by
240
+ the last axes. n_c = number of classes
241
+ :param ignore_unclassified: (bool). True if the the first class should be ignored in the results
242
+ :return: ([..., n_c] np.float32) IoU score
243
+ """
244
+
245
+ # Compute TP, FP, FN. This assume that the second to last axis counts the truths (like the first axis of a
246
+ # confusion matrix), and that the last axis counts the predictions (like the second axis of a confusion matrix)
247
+ TP = np.diagonal(confusions, axis1=-2, axis2=-1)
248
+ TP_plus_FN = np.sum(confusions, axis=-1)
249
+ TP_plus_FP = np.sum(confusions, axis=-2)
250
+
251
+ # Compute IoU
252
+ IoU = TP / (TP_plus_FP + TP_plus_FN - TP + 1e-6)
253
+
254
+ # Compute miou with only the actual classes
255
+ mask = TP_plus_FN < 1e-3
256
+ counts = np.sum(1 - mask, axis=-1, keepdims=True)
257
+ miou = np.sum(IoU, axis=-1, keepdims=True) / (counts + 1e-6)
258
+
259
+ # If class is absent, place miou in place of 0 IoU to get the actual mean later
260
+ IoU += mask * miou
261
+
262
+ return IoU
263
+
264
+
265
+ def shapenetpart_metrics(num_classes, num_parts, objects, preds, targets, masks):
266
+ """
267
+ Args:
268
+ num_classes:
269
+ num_parts:
270
+ objects: [int]
271
+ preds:[(num_parts,num_points)]
272
+ targets: [(num_points)]
273
+ masks: [(num_points)]
274
+ """
275
+ total_correct = 0.0
276
+ total_seen = 0.0
277
+ Confs = []
278
+ for obj, cur_pred, cur_gt, cur_mask in zip(objects, preds, targets, masks):
279
+ obj = int(obj)
280
+ cur_num_parts = num_parts[obj]
281
+ cur_pred = np.argmax(cur_pred, axis=0)
282
+ cur_pred = cur_pred[cur_mask]
283
+ cur_gt = cur_gt[cur_mask]
284
+ correct = np.sum(cur_pred == cur_gt)
285
+ total_correct += correct
286
+ total_seen += cur_pred.shape[0]
287
+ parts = [j for j in range(cur_num_parts)]
288
+ Confs += [confusion_matrix(cur_gt, cur_pred, labels=parts)]
289
+
290
+ Confs = np.array(Confs)
291
+ obj_mious = []
292
+ objects = np.asarray(objects)
293
+ for l in range(num_classes):
294
+ obj_inds = np.where(objects == l)[0]
295
+ obj_confs = np.stack(Confs[obj_inds])
296
+ obj_IoUs = IoU_from_confusions(obj_confs)
297
+ obj_mious += [np.mean(obj_IoUs, axis=-1)]
298
+
299
+ objs_average = [np.mean(mious) for mious in obj_mious]
300
+ instance_average = np.mean(np.hstack(obj_mious))
301
+ class_average = np.mean(objs_average)
302
+ acc = total_correct / total_seen
303
+
304
+ print('Objs | Inst | Air Bag Cap Car Cha Ear Gui Kni Lam Lap Mot Mug Pis Roc Ska Tab')
305
+ print('-----|------|--------------------------------------------------------------------------------')
306
+
307
+ s = '{:4.1f} | {:4.1f} | '.format(100 * class_average, 100 * instance_average)
308
+ for Amiou in objs_average:
309
+ s += '{:4.1f} '.format(100 * Amiou)
310
+ print(s + '\n')
311
+ return acc, objs_average, class_average, instance_average
examples/AutoCls3D_ModelNet40/HIRE-Net/res/best.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82ce50804d09d4fb266301c13d75ef3c794cd14adc8513615b367022af8ef16e
3
+ size 14006197
examples/AutoCls3D_ModelNet40/HIRE-Net/res/final_info.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"modelnet40": {"means": {"best_oa": 95.50243377685547, "best_acc": 92.41918182373047, "epoch": 70}}}
examples/AutoClsSST_SST-2/Baseline/experiment.py ADDED
@@ -0,0 +1,490 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ from dataclasses import dataclass
4
+ from typing import Optional, Tuple, List, Dict, Any
5
+ import time
6
+ import json
7
+ import pathlib
8
+ from tqdm import tqdm
9
+ import pandas as pd
10
+ import numpy as np
11
+ import argparse
12
+ import torch
13
+ from torch import nn
14
+ from torch.utils.data import DataLoader, Dataset
15
+ from transformers import (
16
+ get_linear_schedule_with_warmup,
17
+ BertForSequenceClassification,
18
+ AutoTokenizer,
19
+ AdamW
20
+ )
21
+ from sklearn.metrics import roc_auc_score
22
+
23
+ import traceback
24
+
25
+
26
+ logging.basicConfig(
27
+ format='%(asctime)s - %(levelname)s - %(message)s',
28
+ level=logging.INFO,
29
+ handlers=[
30
+ logging.FileHandler('training.log'),
31
+ logging.StreamHandler()
32
+ ]
33
+ )
34
+ logger = logging.getLogger(__name__)
35
+
36
+
37
+ @dataclass
38
+ class TrainingConfig:
39
+ max_seq_len: int = 50
40
+ epochs: int = 3
41
+ batch_size: int = 32
42
+ learning_rate: float = 2e-5
43
+ patience: int = 1
44
+ max_grad_norm: float = 10.0
45
+ warmup_ratio: float = 0.1
46
+ model_path: str = '/cpfs01/shared/MA4Tool/hug_ckpts/BERT_ckpt'
47
+ num_labels: int = 2
48
+ if_save_model: bool = True
49
+ out_dir: str = './run_0'
50
+
51
+ def validate(self) -> None:
52
+ if self.max_seq_len <= 0:
53
+ raise ValueError("max_seq_len must be positive")
54
+ if self.epochs <= 0:
55
+ raise ValueError("epochs must be positive")
56
+ if self.batch_size <= 0:
57
+ raise ValueError("batch_size must be positive")
58
+ if not (0.0 < self.learning_rate):
59
+ raise ValueError("learning_rate must be between 0 and 1")
60
+
61
+
62
+ class DataPrecessForSentence(Dataset):
63
+ def __init__(self, bert_tokenizer: AutoTokenizer, df: pd.DataFrame, max_seq_len: int = 50):
64
+ self.bert_tokenizer = bert_tokenizer
65
+ self.max_seq_len = max_seq_len
66
+ self.input_ids, self.attention_mask, self.token_type_ids, self.labels = self._get_input(df)
67
+
68
+ def __len__(self) -> int:
69
+ return len(self.labels)
70
+
71
+ def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
72
+ return (
73
+ self.input_ids[idx],
74
+ self.attention_mask[idx],
75
+ self.token_type_ids[idx],
76
+ self.labels[idx]
77
+ )
78
+
79
+ def _get_input(self, df: pd.DataFrame) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
80
+ sentences = df['s1'].values
81
+ labels = df['similarity'].values
82
+
83
+ tokens_seq = list(map(self.bert_tokenizer.tokenize, sentences))
84
+ result = list(map(self._truncate_and_pad, tokens_seq))
85
+
86
+ input_ids = torch.tensor([i[0] for i in result], dtype=torch.long)
87
+ attention_mask = torch.tensor([i[1] for i in result], dtype=torch.long)
88
+ token_type_ids = torch.tensor([i[2] for i in result], dtype=torch.long)
89
+ labels = torch.tensor(labels, dtype=torch.long)
90
+
91
+ return input_ids, attention_mask, token_type_ids, labels
92
+
93
+ def _truncate_and_pad(self, tokens_seq: List[str]) -> Tuple[List[int], List[int], List[int]]:
94
+ tokens_seq = ['[CLS]'] + tokens_seq[:self.max_seq_len - 1]
95
+ padding_length = self.max_seq_len - len(tokens_seq)
96
+
97
+ input_ids = self.bert_tokenizer.convert_tokens_to_ids(tokens_seq)
98
+ input_ids += [0] * padding_length
99
+ attention_mask = [1] * len(tokens_seq) + [0] * padding_length
100
+ token_type_ids = [0] * self.max_seq_len
101
+
102
+ return input_ids, attention_mask, token_type_ids
103
+
104
+
105
+ class BertClassifier(nn.Module):
106
+ def __init__(self, model_path: str, num_labels: int, requires_grad: bool = True):
107
+ super().__init__()
108
+ try:
109
+ self.bert = BertForSequenceClassification.from_pretrained(
110
+ model_path,
111
+ num_labels=num_labels
112
+ )
113
+ self.tokenizer = AutoTokenizer.from_pretrained(model_path)
114
+ except Exception as e:
115
+ logger.error(f"Failed to load BERT model: {e}")
116
+ raise
117
+
118
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
119
+
120
+ for param in self.bert.parameters():
121
+ param.requires_grad = requires_grad
122
+
123
+ def forward(
124
+ self,
125
+ batch_seqs: torch.Tensor,
126
+ batch_seq_masks: torch.Tensor,
127
+ batch_seq_segments: torch.Tensor,
128
+ labels: torch.Tensor
129
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
130
+ loss, logits = self.bert(
131
+ input_ids=batch_seqs,
132
+ attention_mask=batch_seq_masks,
133
+ token_type_ids=batch_seq_segments,
134
+ labels=labels
135
+ )[:2]
136
+ probabilities = nn.functional.softmax(logits, dim=-1)
137
+ return loss, logits, probabilities
138
+
139
+
140
+ class BertTrainer:
141
+ def __init__(self, config: TrainingConfig):
142
+ self.config = config
143
+ self.config.validate()
144
+ self.model = BertClassifier(config.model_path, config.num_labels)
145
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
146
+ self.model.to(self.device)
147
+
148
+ def _prepare_data(
149
+ self,
150
+ train_df: pd.DataFrame,
151
+ dev_df: pd.DataFrame,
152
+ test_df: pd.DataFrame
153
+ ) -> Tuple[DataLoader, DataLoader, DataLoader]:
154
+ train_data = DataPrecessForSentence(
155
+ self.model.tokenizer,
156
+ train_df,
157
+ max_seq_len=self.config.max_seq_len
158
+ )
159
+ train_loader = DataLoader(
160
+ train_data,
161
+ shuffle=True,
162
+ batch_size=self.config.batch_size
163
+ )
164
+
165
+ dev_data = DataPrecessForSentence(
166
+ self.model.tokenizer,
167
+ dev_df,
168
+ max_seq_len=self.config.max_seq_len
169
+ )
170
+ dev_loader = DataLoader(
171
+ dev_data,
172
+ shuffle=False,
173
+ batch_size=self.config.batch_size
174
+ )
175
+
176
+ test_data = DataPrecessForSentence(
177
+ self.model.tokenizer,
178
+ test_df,
179
+ max_seq_len=self.config.max_seq_len
180
+ )
181
+ test_loader = DataLoader(
182
+ test_data,
183
+ shuffle=False,
184
+ batch_size=self.config.batch_size
185
+ )
186
+
187
+ return train_loader, dev_loader, test_loader
188
+
189
+ def _prepare_optimizer(self, num_training_steps: int) -> Tuple[AdamW, Any]:
190
+ param_optimizer = list(self.model.named_parameters())
191
+ no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
192
+ optimizer_grouped_parameters = [
193
+ {
194
+ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
195
+ 'weight_decay': 0.01
196
+ },
197
+ {
198
+ 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
199
+ 'weight_decay': 0.0
200
+ }
201
+ ]
202
+
203
+ optimizer = AdamW(
204
+ optimizer_grouped_parameters,
205
+ lr=self.config.learning_rate
206
+ )
207
+
208
+ scheduler = get_linear_schedule_with_warmup(
209
+ optimizer,
210
+ num_warmup_steps=int(num_training_steps * self.config.warmup_ratio),
211
+ num_training_steps=num_training_steps
212
+ )
213
+
214
+ return optimizer, scheduler
215
+
216
+ def _initialize_training_stats(self) -> Dict[str, List]:
217
+ return {
218
+ 'epochs_count': [],
219
+ 'train_losses': [],
220
+ 'train_accuracies': [],
221
+ 'valid_losses': [],
222
+ 'valid_accuracies': [],
223
+ 'valid_aucs': []
224
+ }
225
+
226
+ def _update_training_stats(
227
+ self,
228
+ training_stats: Dict[str, List],
229
+ epoch: int,
230
+ train_metrics: Dict[str, float],
231
+ val_metrics: Dict[str, float]
232
+ ) -> None:
233
+ training_stats['epochs_count'].append(epoch)
234
+ training_stats['train_losses'].append(train_metrics['loss'])
235
+ training_stats['train_accuracies'].append(train_metrics['accuracy'])
236
+ training_stats['valid_losses'].append(val_metrics['loss'])
237
+ training_stats['valid_accuracies'].append(val_metrics['accuracy'])
238
+ training_stats['valid_aucs'].append(val_metrics['auc'])
239
+
240
+ logger.info(
241
+ f"Training - Loss: {train_metrics['loss']:.4f}, "
242
+ f"Accuracy: {train_metrics['accuracy'] * 100:.2f}%"
243
+ )
244
+ logger.info(
245
+ f"Validation - Loss: {val_metrics['loss']:.4f}, "
246
+ f"Accuracy: {val_metrics['accuracy'] * 100:.2f}%, "
247
+ f"AUC: {val_metrics['auc']:.4f}"
248
+ )
249
+
250
+ def _save_checkpoint(
251
+ self,
252
+ target_dir: str,
253
+ epoch: int,
254
+ optimizer: AdamW,
255
+ best_score: float,
256
+ training_stats: Dict[str, List]
257
+ ) -> None:
258
+ checkpoint = {
259
+ "epoch": epoch,
260
+ "model": self.model.state_dict(),
261
+ "optimizer": optimizer.state_dict(),
262
+ "best_score": best_score,
263
+ **training_stats
264
+ }
265
+ torch.save(
266
+ checkpoint,
267
+ os.path.join(target_dir, "best.pth.tar")
268
+ )
269
+ logger.info("Model saved successfully")
270
+
271
+ def _load_checkpoint(
272
+ self,
273
+ checkpoint_path: str,
274
+ optimizer: AdamW,
275
+ training_stats: Dict[str, List]
276
+ ) -> float:
277
+ checkpoint = torch.load(checkpoint_path)
278
+ self.model.load_state_dict(checkpoint["model"])
279
+ optimizer.load_state_dict(checkpoint["optimizer"])
280
+ for key in training_stats:
281
+ training_stats[key] = checkpoint[key]
282
+ logger.info(f"Loaded checkpoint from epoch {checkpoint['epoch']}")
283
+ return checkpoint["best_score"]
284
+
285
+ def _train_epoch(
286
+ self,
287
+ train_loader: DataLoader,
288
+ optimizer: AdamW,
289
+ scheduler: Any
290
+ ) -> Dict[str, float]:
291
+ self.model.train()
292
+ total_loss = 0
293
+ correct_preds = 0
294
+
295
+ for batch in tqdm(train_loader, desc="Training"):
296
+ batch = tuple(t.to(self.device) for t in batch)
297
+ input_ids, attention_mask, token_type_ids, labels = batch
298
+
299
+ optimizer.zero_grad()
300
+ loss, _, probabilities = self.model(input_ids, attention_mask, token_type_ids, labels)
301
+
302
+ loss.backward()
303
+ nn.utils.clip_grad_norm_(self.model.parameters(), self.config.max_grad_norm)
304
+
305
+ optimizer.step()
306
+ scheduler.step()
307
+
308
+ total_loss += loss.item()
309
+ correct_preds += (probabilities.argmax(dim=1) == labels).sum().item()
310
+
311
+ return {
312
+ 'loss': total_loss / len(train_loader),
313
+ 'accuracy': correct_preds / len(train_loader.dataset)
314
+ }
315
+
316
+ def _validate_epoch(self, dev_loader: DataLoader) -> Tuple[Dict[str, float], List[float]]:
317
+ self.model.eval()
318
+ total_loss = 0
319
+ correct_preds = 0
320
+ all_probs = []
321
+ all_labels = []
322
+
323
+ with torch.no_grad():
324
+ for batch in tqdm(dev_loader, desc="Validating"):
325
+ batch = tuple(t.to(self.device) for t in batch)
326
+ input_ids, attention_mask, token_type_ids, labels = batch
327
+
328
+ loss, _, probabilities = self.model(input_ids, attention_mask, token_type_ids, labels)
329
+
330
+ total_loss += loss.item()
331
+ correct_preds += (probabilities.argmax(dim=1) == labels).sum().item()
332
+ all_probs.extend(probabilities[:, 1].cpu().numpy())
333
+ all_labels.extend(labels.cpu().numpy())
334
+
335
+ metrics = {
336
+ 'loss': total_loss / len(dev_loader),
337
+ 'accuracy': correct_preds / len(dev_loader.dataset),
338
+ 'auc': roc_auc_score(all_labels, all_probs)
339
+ }
340
+
341
+ return metrics, all_probs
342
+
343
+ def _evaluate_test_set(
344
+ self,
345
+ test_loader: DataLoader,
346
+ target_dir: str,
347
+ epoch: int
348
+ ) -> None:
349
+ test_metrics, all_probs = self._validate_epoch(test_loader)
350
+ logger.info(f"Test accuracy: {test_metrics['accuracy'] * 100:.2f}%")
351
+
352
+ test_prediction = pd.DataFrame({'prob_1': all_probs})
353
+ test_prediction['prob_0'] = 1 - test_prediction['prob_1']
354
+ test_prediction['prediction'] = test_prediction.apply(
355
+ lambda x: 0 if (x['prob_0'] > x['prob_1']) else 1,
356
+ axis=1
357
+ )
358
+
359
+ output_path = os.path.join(target_dir, f"test_prediction_epoch_{epoch}.csv")
360
+ test_prediction.to_csv(output_path, index=False)
361
+ logger.info(f"Test predictions saved to {output_path}")
362
+
363
+ def train_and_evaluate(
364
+ self,
365
+ train_df: pd.DataFrame,
366
+ dev_df: pd.DataFrame,
367
+ test_df: pd.DataFrame,
368
+ target_dir: str,
369
+ checkpoint: Optional[str] = None
370
+ ) -> None:
371
+ try:
372
+ os.makedirs(target_dir, exist_ok=True)
373
+
374
+ train_loader, dev_loader, test_loader = self._prepare_data(
375
+ train_df, dev_df, test_df
376
+ )
377
+
378
+ optimizer, scheduler = self._prepare_optimizer(
379
+ len(train_loader) * self.config.epochs
380
+ )
381
+
382
+ training_stats = self._initialize_training_stats()
383
+ best_score = 0.0
384
+ patience_counter = 0
385
+
386
+ if checkpoint:
387
+ best_score = self._load_checkpoint(checkpoint, optimizer, training_stats)
388
+
389
+ for epoch in range(1, self.config.epochs + 1):
390
+ logger.info(f"Training epoch {epoch}")
391
+
392
+ # Train
393
+ train_metrics = self._train_epoch(train_loader, optimizer, scheduler)
394
+
395
+ # Val
396
+ val_metrics, _ = self._validate_epoch(dev_loader)
397
+
398
+ self._update_training_stats(training_stats, epoch, train_metrics, val_metrics)
399
+
400
+ # Saving / Early stopping
401
+ if val_metrics['accuracy'] > best_score:
402
+ best_score = val_metrics['accuracy']
403
+ patience_counter = 0
404
+ if self.config.if_save_model:
405
+ self._save_checkpoint(
406
+ target_dir,
407
+ epoch,
408
+ optimizer,
409
+ best_score,
410
+ training_stats
411
+ )
412
+ self._evaluate_test_set(test_loader, target_dir, epoch)
413
+ else:
414
+ patience_counter += 1
415
+ if patience_counter >= self.config.patience:
416
+ logger.info("Early stopping triggered")
417
+ break
418
+
419
+ final_infos = {
420
+ "sentiment": {
421
+ "means": {
422
+ "best_acc": best_score
423
+ }
424
+ }
425
+ }
426
+
427
+ with open(os.path.join(self.config.out_dir, "final_info.json"), "w") as f:
428
+ json.dump(final_infos, f)
429
+
430
+ except Exception as e:
431
+ logger.error(f"Training failed: {e}")
432
+ raise
433
+
434
+
435
+ def set_seed(seed: int = 42) -> None:
436
+ import random
437
+ random.seed(seed)
438
+ np.random.seed(seed)
439
+ torch.manual_seed(seed)
440
+ torch.cuda.manual_seed_all(seed)
441
+ torch.backends.cudnn.deterministic = True
442
+ torch.backends.cudnn.benchmark = False
443
+ os.environ['PYTHONHASHSEED'] = str(seed)
444
+
445
+
446
+ def main(out_dir):
447
+ try:
448
+ config = TrainingConfig(out_dir=out_dir)
449
+ pathlib.Path(config.out_dir).mkdir(parents=True, exist_ok=True)
450
+
451
+ data_path = "/cpfs01/shared/MA4Tool/datasets/SST-2/"
452
+ train_df = pd.read_csv(
453
+ os.path.join(data_path, "train.tsv"),
454
+ sep='\t',
455
+ header=None,
456
+ names=['similarity', 's1']
457
+ )
458
+ dev_df = pd.read_csv(
459
+ os.path.join(data_path, "dev.tsv"),
460
+ sep='\t',
461
+ header=None,
462
+ names=['similarity', 's1']
463
+ )
464
+ test_df = pd.read_csv(
465
+ os.path.join(data_path, "test.tsv"),
466
+ sep='\t',
467
+ header=None,
468
+ names=['similarity', 's1']
469
+ )
470
+
471
+ set_seed(2024)
472
+
473
+ trainer = BertTrainer(config)
474
+ trainer.train_and_evaluate(train_df, dev_df, test_df, "./output/Bert/")
475
+
476
+ except Exception as e:
477
+ logger.error(f"Program failed: {e}")
478
+ raise
479
+
480
+
481
+ if __name__ == "__main__":
482
+ parser = argparse.ArgumentParser()
483
+ parser.add_argument("--out_dir", type=str, default="run_0")
484
+ args = parser.parse_args()
485
+ try:
486
+ main(args.out_dir)
487
+ except Exception as e:
488
+ print("Original error in subprocess:", flush=True)
489
+ traceback.print_exc(file=open(os.path.join(args.out_dir, "traceback.log"), "w"))
490
+ raise
examples/AutoClsSST_SST-2/Baseline/final_info.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"sentiment": {"means": {"best_acc": 0.9105504587155964}}}
examples/AutoClsSST_SST-2/Baseline/launcher.sh ADDED
@@ -0,0 +1 @@
 
 
1
+ python experiment.py
examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/experiment.py ADDED
@@ -0,0 +1,744 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ import math
4
+ from dataclasses import dataclass, field
5
+ from typing import Optional, Tuple, List, Dict, Any
6
+ import time
7
+ import json
8
+ import pathlib
9
+ from tqdm import tqdm
10
+ import pandas as pd
11
+ import numpy as np
12
+ import argparse
13
+ import torch
14
+ from torch import nn
15
+ from torch.utils.data import DataLoader, Dataset
16
+ from transformers import (
17
+ get_linear_schedule_with_warmup,
18
+ BertForSequenceClassification,
19
+ AutoTokenizer,
20
+ AdamW
21
+ )
22
+ from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score
23
+
24
+ import traceback
25
+ from psycholinguistic_utils import PsycholinguisticFeatures, LinguisticRules, HybridNoiseAugmentation
26
+
27
+
28
+ logging.basicConfig(
29
+ format='%(asctime)s - %(levelname)s - %(message)s',
30
+ level=logging.INFO,
31
+ handlers=[
32
+ logging.FileHandler('training.log'),
33
+ logging.StreamHandler()
34
+ ]
35
+ )
36
+ logger = logging.getLogger(__name__)
37
+
38
+
39
+ @dataclass
40
+ class TrainingConfig:
41
+ max_seq_len: int = 50
42
+ epochs: int = 3
43
+ batch_size: int = 32
44
+ learning_rate: float = 2e-5
45
+ patience: int = 1
46
+ max_grad_norm: float = 10.0
47
+ warmup_ratio: float = 0.1
48
+ model_path: str = './hug_ckpts/BERT_ckpt'
49
+ num_labels: int = 2
50
+ if_save_model: bool = True
51
+ out_dir: str = './run_1'
52
+
53
+ # Hybrid noise augmentation parameters
54
+ use_hybrid_augmentation: bool = True
55
+ sigma: float = 0.1 # Gaussian noise scaling factor
56
+ alpha: float = 0.5 # Hybrid weight
57
+ gamma: float = 0.1 # Attention adjustment parameter
58
+
59
+ # Evaluation parameters
60
+ evaluate_adversarial: bool = True
61
+ adversarial_types: List[str] = field(default_factory=lambda: ['sarcasm', 'negation', 'polysemy'])
62
+
63
+ def validate(self) -> None:
64
+ if self.max_seq_len <= 0:
65
+ raise ValueError("max_seq_len must be positive")
66
+ if self.epochs <= 0:
67
+ raise ValueError("epochs must be positive")
68
+ if self.batch_size <= 0:
69
+ raise ValueError("batch_size must be positive")
70
+ if not (0.0 < self.learning_rate):
71
+ raise ValueError("learning_rate must be between 0 and 1")
72
+ if not (0.0 <= self.sigma <= 1.0):
73
+ raise ValueError("sigma must be between 0 and 1")
74
+ if not (0.0 <= self.alpha <= 1.0):
75
+ raise ValueError("alpha must be between 0 and 1")
76
+ if not (0.0 <= self.gamma <= 1.0):
77
+ raise ValueError("gamma must be between 0 and 1")
78
+
79
+
80
+ class DataPrecessForSentence(Dataset):
81
+ def __init__(self, bert_tokenizer: AutoTokenizer, df: pd.DataFrame, max_seq_len: int = 50):
82
+ self.bert_tokenizer = bert_tokenizer
83
+ self.max_seq_len = max_seq_len
84
+ self.input_ids, self.attention_mask, self.token_type_ids, self.labels = self._get_input(df)
85
+ self.raw_texts = df['s1'].values # Save original text for noise augmentation
86
+
87
+ def __len__(self) -> int:
88
+ return len(self.labels)
89
+
90
+ def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, str]:
91
+ return (
92
+ self.input_ids[idx],
93
+ self.attention_mask[idx],
94
+ self.token_type_ids[idx],
95
+ self.labels[idx],
96
+ self.raw_texts[idx] # Return original text
97
+ )
98
+
99
+ def _get_input(self, df: pd.DataFrame) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
100
+ sentences = df['s1'].values
101
+ labels = df['similarity'].values
102
+
103
+ tokens_seq = list(map(self.bert_tokenizer.tokenize, sentences))
104
+ result = list(map(self._truncate_and_pad, tokens_seq))
105
+
106
+ input_ids = torch.tensor([i[0] for i in result], dtype=torch.long)
107
+ attention_mask = torch.tensor([i[1] for i in result], dtype=torch.long)
108
+ token_type_ids = torch.tensor([i[2] for i in result], dtype=torch.long)
109
+ labels = torch.tensor(labels, dtype=torch.long)
110
+
111
+ return input_ids, attention_mask, token_type_ids, labels
112
+
113
+ def _truncate_and_pad(self, tokens_seq: List[str]) -> Tuple[List[int], List[int], List[int]]:
114
+ tokens_seq = ['[CLS]'] + tokens_seq[:self.max_seq_len - 1]
115
+ padding_length = self.max_seq_len - len(tokens_seq)
116
+
117
+ input_ids = self.bert_tokenizer.convert_tokens_to_ids(tokens_seq)
118
+ input_ids += [0] * padding_length
119
+ attention_mask = [1] * len(tokens_seq) + [0] * padding_length
120
+ token_type_ids = [0] * self.max_seq_len
121
+
122
+ return input_ids, attention_mask, token_type_ids
123
+
124
+
125
+ class BertClassifier(nn.Module):
126
+ def __init__(
127
+ self,
128
+ model_path: str,
129
+ num_labels: int,
130
+ requires_grad: bool = True,
131
+ use_hybrid_augmentation: bool = True,
132
+ sigma: float = 0.1,
133
+ alpha: float = 0.5,
134
+ gamma: float = 0.1
135
+ ):
136
+ super().__init__()
137
+ try:
138
+ self.bert = BertForSequenceClassification.from_pretrained(
139
+ model_path,
140
+ num_labels=num_labels
141
+ )
142
+ self.tokenizer = AutoTokenizer.from_pretrained(model_path)
143
+ except Exception as e:
144
+ logger.error(f"Failed to load BERT model: {e}")
145
+ raise
146
+
147
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
148
+
149
+ # Hybrid noise augmentation settings
150
+ self.use_hybrid_augmentation = use_hybrid_augmentation
151
+ if use_hybrid_augmentation:
152
+ self.hybrid_augmentation = HybridNoiseAugmentation(
153
+ sigma=sigma,
154
+ alpha=alpha,
155
+ gamma=gamma
156
+ )
157
+
158
+ for param in self.bert.parameters():
159
+ param.requires_grad = requires_grad
160
+
161
+ def _apply_hybrid_augmentation(
162
+ self,
163
+ embeddings: torch.Tensor,
164
+ attention_mask: torch.Tensor,
165
+ texts: List[str]
166
+ ) -> torch.Tensor:
167
+
168
+ if not self.use_hybrid_augmentation:
169
+ return embeddings
170
+
171
+ # Generate hybrid embeddings
172
+ hybrid_embeddings = self.hybrid_augmentation.generate_hybrid_embeddings(
173
+ embeddings, texts, self.tokenizer
174
+ )
175
+
176
+ return hybrid_embeddings
177
+
178
+ def _apply_attention_adjustment(
179
+ self,
180
+ query: torch.Tensor,
181
+ key: torch.Tensor,
182
+ value: torch.Tensor,
183
+ attention_mask: torch.Tensor,
184
+ texts: List[str]
185
+ ) -> torch.Tensor:
186
+ """Adjust attention scores"""
187
+ if not self.use_hybrid_augmentation:
188
+ # Standard attention calculation
189
+ attention_scores = torch.matmul(query, key.transpose(-1, -2))
190
+ attention_scores = attention_scores / math.sqrt(query.size(-1))
191
+
192
+ # Apply attention mask
193
+ if attention_mask is not None:
194
+ attention_scores = attention_scores + attention_mask
195
+
196
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
197
+ context_layer = torch.matmul(attention_probs, value)
198
+ return context_layer
199
+
200
+ # Generate psycholinguistic alignment matrix
201
+ H = self.hybrid_augmentation.generate_psycholinguistic_alignment_matrix(
202
+ texts, query.size(2), query.device
203
+ )
204
+
205
+ # Calculate attention scores
206
+ attention_scores = torch.matmul(query, key.transpose(-1, -2))
207
+ attention_scores = attention_scores / math.sqrt(query.size(-1))
208
+
209
+ # Add psycholinguistic alignment
210
+ gamma = self.hybrid_augmentation.gamma
211
+ attention_scores = attention_scores + gamma * H.unsqueeze(1) # Add dimension for multi-head attention
212
+
213
+ # Apply attention mask
214
+ if attention_mask is not None:
215
+ attention_scores = attention_scores + attention_mask
216
+
217
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
218
+ context_layer = torch.matmul(attention_probs, value)
219
+ return context_layer
220
+
221
+ def forward(
222
+ self,
223
+ batch_seqs: torch.Tensor,
224
+ batch_seq_masks: torch.Tensor,
225
+ batch_seq_segments: torch.Tensor,
226
+ labels: torch.Tensor,
227
+ texts: Optional[List[str]] = None
228
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
229
+ # If hybrid noise augmentation is enabled but no texts provided, use standard forward pass
230
+ if self.use_hybrid_augmentation and texts is None:
231
+ logger.warning("Hybrid augmentation enabled but no texts provided. Using standard forward pass.")
232
+ self.use_hybrid_augmentation = False
233
+
234
+ # Standard BERT forward pass
235
+ outputs = self.bert(
236
+ input_ids=batch_seqs,
237
+ attention_mask=batch_seq_masks,
238
+ token_type_ids=batch_seq_segments,
239
+ labels=labels,
240
+ output_hidden_states=self.use_hybrid_augmentation # Need hidden states if using augmentation
241
+ )
242
+
243
+ loss = outputs.loss
244
+ logits = outputs.logits
245
+
246
+ # If hybrid noise augmentation is enabled, apply to hidden states
247
+ if self.use_hybrid_augmentation and texts:
248
+ # Get the last layer hidden states
249
+ hidden_states = outputs.hidden_states[-1]
250
+
251
+ # Apply hybrid noise augmentation
252
+ augmented_hidden_states = self._apply_hybrid_augmentation(
253
+ hidden_states, batch_seq_masks, texts
254
+ )
255
+
256
+ # Recalculate classifier output using augmented hidden states
257
+ pooled_output = augmented_hidden_states[:, 0] # Use [CLS] token representation
258
+ logits = self.bert.classifier(pooled_output)
259
+
260
+ # Recalculate loss
261
+ if labels is not None:
262
+ loss_fct = nn.CrossEntropyLoss()
263
+ loss = loss_fct(logits.view(-1, self.bert.config.num_labels), labels.view(-1))
264
+
265
+ probabilities = nn.functional.softmax(logits, dim=-1)
266
+ return loss, logits, probabilities
267
+
268
+
269
+
270
+ class BertTrainer:
271
+ def __init__(self, config: TrainingConfig):
272
+ self.config = config
273
+ self.config.validate()
274
+ self.model = BertClassifier(
275
+ config.model_path,
276
+ config.num_labels,
277
+ use_hybrid_augmentation=config.use_hybrid_augmentation,
278
+ sigma=config.sigma,
279
+ alpha=config.alpha,
280
+ gamma=config.gamma
281
+ )
282
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
283
+ self.model.to(self.device)
284
+
285
+ def _prepare_data(
286
+ self,
287
+ train_df: pd.DataFrame,
288
+ dev_df: pd.DataFrame,
289
+ test_df: pd.DataFrame
290
+ ) -> Tuple[DataLoader, DataLoader, DataLoader]:
291
+ train_data = DataPrecessForSentence(
292
+ self.model.tokenizer,
293
+ train_df,
294
+ max_seq_len=self.config.max_seq_len
295
+ )
296
+ train_loader = DataLoader(
297
+ train_data,
298
+ shuffle=True,
299
+ batch_size=self.config.batch_size
300
+ )
301
+
302
+ dev_data = DataPrecessForSentence(
303
+ self.model.tokenizer,
304
+ dev_df,
305
+ max_seq_len=self.config.max_seq_len
306
+ )
307
+ dev_loader = DataLoader(
308
+ dev_data,
309
+ shuffle=False,
310
+ batch_size=self.config.batch_size
311
+ )
312
+
313
+ test_data = DataPrecessForSentence(
314
+ self.model.tokenizer,
315
+ test_df,
316
+ max_seq_len=self.config.max_seq_len
317
+ )
318
+ test_loader = DataLoader(
319
+ test_data,
320
+ shuffle=False,
321
+ batch_size=self.config.batch_size
322
+ )
323
+
324
+ return train_loader, dev_loader, test_loader
325
+
326
+ def _prepare_optimizer(self, num_training_steps: int) -> Tuple[AdamW, Any]:
327
+ param_optimizer = list(self.model.named_parameters())
328
+ no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
329
+ optimizer_grouped_parameters = [
330
+ {
331
+ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
332
+ 'weight_decay': 0.01
333
+ },
334
+ {
335
+ 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
336
+ 'weight_decay': 0.0
337
+ }
338
+ ]
339
+
340
+ optimizer = AdamW(
341
+ optimizer_grouped_parameters,
342
+ lr=self.config.learning_rate
343
+ )
344
+
345
+ scheduler = get_linear_schedule_with_warmup(
346
+ optimizer,
347
+ num_warmup_steps=int(num_training_steps * self.config.warmup_ratio),
348
+ num_training_steps=num_training_steps
349
+ )
350
+
351
+ return optimizer, scheduler
352
+
353
+ def _initialize_training_stats(self) -> Dict[str, List]:
354
+ return {
355
+ 'epochs_count': [],
356
+ 'train_losses': [],
357
+ 'train_accuracies': [],
358
+ 'valid_losses': [],
359
+ 'valid_accuracies': [],
360
+ 'valid_aucs': []
361
+ }
362
+
363
+ def _update_training_stats(
364
+ self,
365
+ training_stats: Dict[str, List],
366
+ epoch: int,
367
+ train_metrics: Dict[str, float],
368
+ val_metrics: Dict[str, float]
369
+ ) -> None:
370
+ training_stats['epochs_count'].append(epoch)
371
+ training_stats['train_losses'].append(train_metrics['loss'])
372
+ training_stats['train_accuracies'].append(train_metrics['accuracy'])
373
+ training_stats['valid_losses'].append(val_metrics['loss'])
374
+ training_stats['valid_accuracies'].append(val_metrics['accuracy'])
375
+ training_stats['valid_aucs'].append(val_metrics['auc'])
376
+
377
+ logger.info(
378
+ f"Training - Loss: {train_metrics['loss']:.4f}, "
379
+ f"Accuracy: {train_metrics['accuracy'] * 100:.2f}%"
380
+ )
381
+ logger.info(
382
+ f"Validation - Loss: {val_metrics['loss']:.4f}, "
383
+ f"Accuracy: {val_metrics['accuracy'] * 100:.2f}%, "
384
+ f"AUC: {val_metrics['auc']:.4f}"
385
+ )
386
+
387
+ def _save_checkpoint(
388
+ self,
389
+ target_dir: str,
390
+ epoch: int,
391
+ optimizer: AdamW,
392
+ best_score: float,
393
+ training_stats: Dict[str, List]
394
+ ) -> None:
395
+ checkpoint = {
396
+ "epoch": epoch,
397
+ "model": self.model.state_dict(),
398
+ "optimizer": optimizer.state_dict(),
399
+ "best_score": best_score,
400
+ **training_stats
401
+ }
402
+ torch.save(
403
+ checkpoint,
404
+ os.path.join(target_dir, "best.pth.tar")
405
+ )
406
+ logger.info("Model saved successfully")
407
+
408
+ def _load_checkpoint(
409
+ self,
410
+ checkpoint_path: str,
411
+ optimizer: AdamW,
412
+ training_stats: Dict[str, List]
413
+ ) -> float:
414
+ checkpoint = torch.load(checkpoint_path)
415
+ self.model.load_state_dict(checkpoint["model"])
416
+ optimizer.load_state_dict(checkpoint["optimizer"])
417
+ for key in training_stats:
418
+ training_stats[key] = checkpoint[key]
419
+ logger.info(f"Loaded checkpoint from epoch {checkpoint['epoch']}")
420
+ return checkpoint["best_score"]
421
+
422
+ def _train_epoch(
423
+ self,
424
+ train_loader: DataLoader,
425
+ optimizer: AdamW,
426
+ scheduler: Any
427
+ ) -> Dict[str, float]:
428
+ self.model.train()
429
+ total_loss = 0
430
+ correct_preds = 0
431
+
432
+ for batch in tqdm(train_loader, desc="Training"):
433
+ # Process batch containing texts
434
+ input_ids, attention_mask, token_type_ids, labels, texts = batch
435
+ input_ids = input_ids.to(self.device)
436
+ attention_mask = attention_mask.to(self.device)
437
+ token_type_ids = token_type_ids.to(self.device)
438
+ labels = labels.to(self.device)
439
+
440
+ optimizer.zero_grad()
441
+ loss, _, probabilities = self.model(
442
+ input_ids,
443
+ attention_mask,
444
+ token_type_ids,
445
+ labels,
446
+ texts # Pass original texts for noise augmentation
447
+ )
448
+
449
+ loss.backward()
450
+ nn.utils.clip_grad_norm_(self.model.parameters(), self.config.max_grad_norm)
451
+
452
+ optimizer.step()
453
+ scheduler.step()
454
+
455
+ total_loss += loss.item()
456
+ correct_preds += (probabilities.argmax(dim=1) == labels).sum().item()
457
+
458
+ return {
459
+ 'loss': total_loss / len(train_loader),
460
+ 'accuracy': correct_preds / len(train_loader.dataset)
461
+ }
462
+
463
+ def _validate_epoch(self, dev_loader: DataLoader) -> Tuple[Dict[str, float], List[float]]:
464
+ self.model.eval()
465
+ total_loss = 0
466
+ correct_preds = 0
467
+ all_probs = []
468
+ all_labels = []
469
+ all_preds = []
470
+
471
+ with torch.no_grad():
472
+ for batch in tqdm(dev_loader, desc="Validating"):
473
+
474
+ input_ids, attention_mask, token_type_ids, labels, texts = batch
475
+ input_ids = input_ids.to(self.device)
476
+ attention_mask = attention_mask.to(self.device)
477
+ token_type_ids = token_type_ids.to(self.device)
478
+ labels = labels.to(self.device)
479
+
480
+ loss, _, probabilities = self.model(
481
+ input_ids,
482
+ attention_mask,
483
+ token_type_ids,
484
+ labels,
485
+ texts
486
+ )
487
+
488
+ total_loss += loss.item()
489
+ predictions = probabilities.argmax(dim=1)
490
+ correct_preds += (predictions == labels).sum().item()
491
+ all_probs.extend(probabilities[:, 1].cpu().numpy())
492
+ all_labels.extend(labels.cpu().numpy())
493
+ all_preds.extend(predictions.cpu().numpy())
494
+
495
+ metrics = {
496
+ 'loss': total_loss / len(dev_loader),
497
+ 'accuracy': correct_preds / len(dev_loader.dataset),
498
+ 'auc': roc_auc_score(all_labels, all_probs),
499
+ 'f1': f1_score(all_labels, all_preds, average='weighted'),
500
+ 'precision': precision_score(all_labels, all_preds, average='weighted'),
501
+ 'recall': recall_score(all_labels, all_preds, average='weighted')
502
+ }
503
+
504
+ return metrics, all_probs
505
+
506
+ def _evaluate_test_set(
507
+ self,
508
+ test_loader: DataLoader,
509
+ target_dir: str,
510
+ epoch: int
511
+ ) -> Dict[str, float]:
512
+ test_metrics, all_probs = self._validate_epoch(test_loader)
513
+ logger.info(f"Test accuracy: {test_metrics['accuracy'] * 100:.2f}%")
514
+ logger.info(f"Test F1 score: {test_metrics['f1'] * 100:.2f}%")
515
+ logger.info(f"Test AUC: {test_metrics['auc']:.4f}")
516
+
517
+ test_prediction = pd.DataFrame({'prob_1': all_probs})
518
+ test_prediction['prob_0'] = 1 - test_prediction['prob_1']
519
+ test_prediction['prediction'] = test_prediction.apply(
520
+ lambda x: 0 if (x['prob_0'] > x['prob_1']) else 1,
521
+ axis=1
522
+ )
523
+
524
+ output_path = os.path.join(target_dir, f"test_prediction_epoch_{epoch}.csv")
525
+ test_prediction.to_csv(output_path, index=False)
526
+ logger.info(f"Test predictions saved to {output_path}")
527
+
528
+ if self.config.evaluate_adversarial:
529
+ self._evaluate_adversarial_robustness(test_loader, target_dir, epoch)
530
+
531
+ return test_metrics
532
+
533
+ def _evaluate_adversarial_robustness(
534
+ self,
535
+ test_loader: DataLoader,
536
+ target_dir: str,
537
+ epoch: int
538
+ ) -> None:
539
+ """Evaluate model robustness across different linguistic phenomena"""
540
+ logger.info("Evaluating adversarial robustness...")
541
+
542
+ linguistic_rules = LinguisticRules()
543
+
544
+ phenomenon_results = {
545
+ 'sarcasm': {'correct': 0, 'total': 0},
546
+ 'negation': {'correct': 0, 'total': 0},
547
+ 'polysemy': {'correct': 0, 'total': 0}
548
+ }
549
+
550
+ self.model.eval()
551
+ with torch.no_grad():
552
+ for batch in tqdm(test_loader, desc="Adversarial Evaluation"):
553
+ input_ids, attention_mask, token_type_ids, labels, texts = batch
554
+ input_ids = input_ids.to(self.device)
555
+ attention_mask = attention_mask.to(self.device)
556
+ token_type_ids = token_type_ids.to(self.device)
557
+ labels = labels.to(self.device)
558
+
559
+ # Get model predictions
560
+ _, _, probabilities = self.model(
561
+ input_ids, attention_mask, token_type_ids, labels, texts
562
+ )
563
+ predictions = probabilities.argmax(dim=1)
564
+
565
+ # Check linguistic phenomena for each sample
566
+ for i, text in enumerate(texts):
567
+ # Check for sarcasm
568
+ if linguistic_rules.detect_sarcasm(text):
569
+ phenomenon_results['sarcasm']['total'] += 1
570
+ if predictions[i] == labels[i]:
571
+ phenomenon_results['sarcasm']['correct'] += 1
572
+
573
+ # Check for negation
574
+ if linguistic_rules.detect_negation(text):
575
+ phenomenon_results['negation']['total'] += 1
576
+ if predictions[i] == labels[i]:
577
+ phenomenon_results['negation']['correct'] += 1
578
+
579
+ # Check for polysemy
580
+ if linguistic_rules.find_polysemy_words(text):
581
+ phenomenon_results['polysemy']['total'] += 1
582
+ if predictions[i] == labels[i]:
583
+ phenomenon_results['polysemy']['correct'] += 1
584
+
585
+ phenomenon_accuracy = {}
586
+ for phenomenon, results in phenomenon_results.items():
587
+ if results['total'] > 0:
588
+ accuracy = results['correct'] / results['total']
589
+ phenomenon_accuracy[phenomenon] = accuracy
590
+ logger.info(f"Accuracy on {phenomenon}: {accuracy * 100:.2f}% ({results['correct']}/{results['total']})")
591
+ else:
592
+ phenomenon_accuracy[phenomenon] = 0.0
593
+ logger.info(f"No samples found for {phenomenon}")
594
+
595
+ with open(os.path.join(target_dir, f"adversarial_results_epoch_{epoch}.json"), "w") as f:
596
+ json.dump(phenomenon_accuracy, f)
597
+
598
+ def train_and_evaluate(
599
+ self,
600
+ train_df: pd.DataFrame,
601
+ dev_df: pd.DataFrame,
602
+ test_df: pd.DataFrame,
603
+ target_dir: str,
604
+ checkpoint: Optional[str] = None
605
+ ) -> Dict[str, float]:
606
+ try:
607
+ os.makedirs(target_dir, exist_ok=True)
608
+
609
+ train_loader, dev_loader, test_loader = self._prepare_data(
610
+ train_df, dev_df, test_df
611
+ )
612
+
613
+ optimizer, scheduler = self._prepare_optimizer(
614
+ len(train_loader) * self.config.epochs
615
+ )
616
+
617
+ training_stats = self._initialize_training_stats()
618
+ best_score = 0.0
619
+ patience_counter = 0
620
+ best_test_metrics = None
621
+
622
+ if checkpoint:
623
+ best_score = self._load_checkpoint(checkpoint, optimizer, training_stats)
624
+
625
+ for epoch in range(1, self.config.epochs + 1):
626
+ logger.info(f"Training epoch {epoch}")
627
+
628
+ # Train
629
+ train_metrics = self._train_epoch(train_loader, optimizer, scheduler)
630
+
631
+ # Val
632
+ val_metrics, _ = self._validate_epoch(dev_loader)
633
+
634
+ self._update_training_stats(training_stats, epoch, train_metrics, val_metrics)
635
+
636
+ # Saving / Early stopping
637
+ if val_metrics['accuracy'] > best_score:
638
+ best_score = val_metrics['accuracy']
639
+ patience_counter = 0
640
+ if self.config.if_save_model:
641
+ self._save_checkpoint(
642
+ target_dir,
643
+ epoch,
644
+ optimizer,
645
+ best_score,
646
+ training_stats
647
+ )
648
+ best_test_metrics = self._evaluate_test_set(test_loader, target_dir, epoch)
649
+ else:
650
+ patience_counter += 1
651
+ if patience_counter >= self.config.patience:
652
+ logger.info("Early stopping triggered")
653
+ break
654
+
655
+ if best_test_metrics is None:
656
+ best_test_metrics = self._evaluate_test_set(test_loader, target_dir, epoch)
657
+
658
+ return best_test_metrics
659
+
660
+ except Exception as e:
661
+ logger.error(f"Training failed: {e}")
662
+ raise
663
+
664
+
665
+ def set_seed(seed: int = 42) -> None:
666
+ import random
667
+ random.seed(seed)
668
+ np.random.seed(seed)
669
+ torch.manual_seed(seed)
670
+ torch.cuda.manual_seed_all(seed)
671
+ torch.backends.cudnn.deterministic = True
672
+ torch.backends.cudnn.benchmark = False
673
+ os.environ['PYTHONHASHSEED'] = str(seed)
674
+
675
+
676
+ def main(args):
677
+ try:
678
+ config = TrainingConfig(out_dir=args.out_dir)
679
+ pathlib.Path(config.out_dir).mkdir(parents=True, exist_ok=True)
680
+
681
+ with open(os.path.join(config.out_dir, "config.json"), "w") as f:
682
+ config_dict = {k: v for k, v in config.__dict__.items()
683
+ if not k.startswith('_') and not callable(v)}
684
+ json.dump(config_dict, f, indent=2)
685
+
686
+ train_df = pd.read_csv(
687
+ os.path.join(args.data_path, "train.tsv"),
688
+ sep='\t',
689
+ header=None,
690
+ names=['similarity', 's1']
691
+ )
692
+ dev_df = pd.read_csv(
693
+ os.path.join(args.data_path, "dev.tsv"),
694
+ sep='\t',
695
+ header=None,
696
+ names=['similarity', 's1']
697
+ )
698
+ test_df = pd.read_csv(
699
+ os.path.join(args.data_path, "test.tsv"),
700
+ sep='\t',
701
+ header=None,
702
+ names=['similarity', 's1']
703
+ )
704
+
705
+ set_seed(2024)
706
+
707
+ logger.info(f"Starting training with hybrid augmentation: {config.use_hybrid_augmentation}")
708
+ if config.use_hybrid_augmentation:
709
+ logger.info(f"Augmentation parameters - sigma: {config.sigma}, alpha: {config.alpha}, gamma: {config.gamma}")
710
+
711
+ trainer = BertTrainer(config)
712
+ test_metrics = trainer.train_and_evaluate(train_df, dev_df, test_df, os.path.join(config.out_dir, "output"))
713
+
714
+ final_infos = {
715
+ "sentiment": {
716
+ "means": {
717
+ "best_acc": test_metrics['accuracy'],
718
+ "best_f1": test_metrics['f1'],
719
+ "best_auc": test_metrics['auc']
720
+ }
721
+ }
722
+ }
723
+
724
+ with open(os.path.join(config.out_dir, "final_info.json"), "w") as f:
725
+ json.dump(final_infos, f, indent=2)
726
+
727
+ logger.info(f"Training completed successfully. Results saved to {config.out_dir}")
728
+
729
+ except Exception as e:
730
+ logger.error(f"Program failed: {e}")
731
+ raise
732
+
733
+
734
+ if __name__ == "__main__":
735
+ parser = argparse.ArgumentParser()
736
+ parser.add_argument("--out_dir", type=str, default="./run_1")
737
+ parser.add_argument("--data_path", type=str, default="./datasets/SST-2/")
738
+ args = parser.parse_args()
739
+ try:
740
+ main(args)
741
+ except Exception as e:
742
+ print("Original error in subprocess:", flush=True)
743
+ traceback.print_exc(file=open(os.path.join(args.out_dir, "traceback.log"), "w"))
744
+ raise
examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/idea.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Transformer-Hybrid-Augmentation-Sentiment",
3
+ "title": "Hybrid Noise Augmentation with Psycholinguistic and Linguistic Rule Integration for Adversarially Robust Sentiment Analysis",
4
+ "description": "This method refines and extends transformer-based sentiment analysis on the SST-2 dataset by introducing a mathematically formalized and algorithmically detailed hybrid noise augmentation approach. The refinement integrates psycholinguistically-grounded neural text generation with rule-based handling of sarcasm, negation, and polysemy through a unified framework. The approach uses adversarial benchmarks like TextFlint for robustness evaluation under noisy and low-resource conditions, promoting reproducibility and practical feasibility.",
5
+ "statement": "The proposed method advances the field of sentiment analysis by mathematically formalizing the integration of psycholinguistic features and linguistic rules into hybrid noise augmentation. Uniquely, it ties these augmentations directly to transformer-layer representations through a quantifiable and interpretable alignment framework. This approach bridges gaps between linguistic phenomena and deep learning architectures, notably improving adversarial robustness as evidenced by evaluations on curated datasets and adversarial benchmarks.",
6
+ "method": "### Hybrid Noise Augmentation and Integration with Transformer Layers\n\n1. **Mathematical Framework for Noise Augmentation**\n - The hybrid noise generation process combines two components:\n - **Psycholinguistic Neural Text Noise**: Modeled as a Gaussian perturbation applied to the embedding space of tokens, guided by psycholinguistic scores. Formally:\n \\[\n e' = e + \\mathcal{N}(0, \\sigma^2 \\cdot S) \\quad \\text{s.t.} \\quad S \\propto \\text{psycholinguistic importance (e.g., valence, arousal, dominance)}\n \\]\n Where \\(e\\) is the original token embedding, \\(\\sigma\\) is a scaling factor, and \\(S\\) indicates a psycholinguistic importance score.\n - **Linguistic Rule-Based Perturbation**: Encodes augmentations tied to sarcasm (e.g., exaggeration patterns), negation (e.g., flipping polarity), and polysemy (e.g., substituting ambiguous tokens). These operations are encoded as transformation matrices mapping token embeddings \\(e\\) to augmented forms \\(e''\\):\n \\[\n e'' = R_{\\text{rule}} \\cdot e\n \\]\n Where \\(R_{\\text{rule}}\\) represents rule-specific embedding transformations.\n - The final hybrid embedding \\(e_\\text{aug}\\) is computed as:\n \\[\n e_\\text{aug} = \\alpha e' + (1 - \\alpha)e'' \\quad \\text{with } \\alpha \\in [0, 1].\n \\]\n\n2. **Alignment with Transformer Representations**\n - To integrate augmented embeddings into transformer training, the hybrid embeddings are fused during forward passes in the multi-head attention mechanism. The attention scores \\(A\\) are revised to weight augmented signals:\n \\[\n A_{\\text{aug}} = \\text{softmax}\\left(\\frac{QK^\\top}{\\sqrt{d_k}} + \\gamma \\cdot H\\right),\n \\]\n Where \\(H\\) represents a psycholinguistic alignment matrix emphasizing linguistic phenomena relevance, \\(\\gamma\\) is a tunable hyperparameter, and \\(d_k\\) is the dimension of keys.\n\n3. **Algorithmic Workflow (Pseudocode)**\n ```\n Input: Training dataset (D), psycholinguistic features (P), linguistic rules (L), transformer hyperparameters\n Output: Trained sentiment model with robustness metrics\n\n Step 1: Preprocess D by computing psycholinguistic scores (S) for each token and applying rules (L) to generate augmentations.\n Step 2: For each batch in training pipeline:\n a. Generate hybrid embeddings using Eq. (3).\n b. Replace token embeddings in transformer layers with hybrid embeddings.\n c. Recompute multi-head attention scores using Eq. (4).\n Step 3: Fine-tune the model on augmentation-adjusted samples.\n Step 4: Evaluate on adversarial benchmarks (e.g., TextFlint) and record metrics (e.g., F1 score, robustness under noise).\n ```\n\n4. **Adversarial and Phenomena-Specific Validation**\n - Adversarial robustness is validated using TextFlint benchmarks, targeting linguistic phenomena like sarcasm, negation, and polysemy. Metrics include error rate breakdown by phenomena and overall performance stability under noise.\n\n5. **Parameter Initialization and Tuning**\n - \\(\\sigma\\), \\(S\\), \\(\\alpha\\), \\(\\gamma\\) are empirically tuned on validation data with cross-validation ensuring consistency with linguistic phenomena distributions.\n\nThis refined method addresses critiques of mathematical insufficiency, algorithmic clarity, and reproducibility while ensuring strong theoretical and practical contributions to sentiment analysis."
7
+ }
examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/launcher.sh ADDED
@@ -0,0 +1 @@
 
 
1
+ python experiment.py
examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/psycholinguistic_utils.py ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import pandas as pd
4
+ import torch
5
+ from typing import Dict, List, Tuple, Union, Optional
6
+ import nltk
7
+ from nltk.corpus import wordnet as wn
8
+ from nltk.tokenize import word_tokenize
9
+ import re
10
+ import logging
11
+
12
+ # Configure logging
13
+ logging.basicConfig(level=logging.INFO)
14
+ logger = logging.getLogger(__name__)
15
+
16
+ # Ensure NLTK resources are available
17
+ def ensure_nltk_resources():
18
+ """Ensure necessary NLTK resources are downloaded"""
19
+ resources = ['punkt', 'wordnet']
20
+ for resource in resources:
21
+ try:
22
+ nltk.data.find(f'tokenizers/{resource}')
23
+ logger.info(f"NLTK resource {resource} already exists")
24
+ except LookupError:
25
+ try:
26
+ logger.info(f"Downloading NLTK resource {resource}")
27
+ nltk.download(resource, quiet=False)
28
+ logger.info(f"NLTK resource {resource} downloaded successfully")
29
+ except Exception as e:
30
+ logger.error(f"Failed to download NLTK resource {resource}: {str(e)}")
31
+
32
+ # Try to download punkt_tab resource
33
+ try:
34
+ nltk.data.find('tokenizers/punkt_tab')
35
+ except LookupError:
36
+ try:
37
+ logger.info("Downloading NLTK resource punkt_tab")
38
+ nltk.download('punkt_tab', quiet=False)
39
+ logger.info("NLTK resource punkt_tab downloaded successfully")
40
+ except Exception as e:
41
+ logger.warning(f"Failed to download NLTK resource punkt_tab: {str(e)}")
42
+ logger.info("Will use alternative tokenization method")
43
+
44
+ # Try to download resources when module is imported
45
+ ensure_nltk_resources()
46
+
47
+ # Ensure necessary NLTK resources are downloaded
48
+ try:
49
+ nltk.data.find('tokenizers/punkt')
50
+ except LookupError:
51
+ nltk.download('punkt')
52
+ try:
53
+ nltk.data.find('corpora/wordnet')
54
+ except LookupError:
55
+ nltk.download('wordnet')
56
+
57
+ # Simple tokenization function, not dependent on NLTK
58
+ def simple_tokenize(text):
59
+ """Simple tokenization function using regular expressions"""
60
+ if not isinstance(text, str):
61
+ return []
62
+ # Convert text to lowercase
63
+ text = text.lower()
64
+ # Use regular expressions for tokenization, preserving letters, numbers, and some basic punctuation
65
+ import re
66
+ tokens = re.findall(r'\b\w+\b|[!?,.]', text)
67
+ return tokens
68
+
69
+ # Add more robust tokenization processing
70
+ def safe_tokenize(text):
71
+ """Safe tokenization function, uses simple tokenization method when NLTK tokenization fails"""
72
+ if not isinstance(text, str):
73
+ return []
74
+
75
+ # First try using NLTK's word_tokenize
76
+ punkt_available = True
77
+ try:
78
+ nltk.data.find('tokenizers/punkt')
79
+ except LookupError:
80
+ punkt_available = False
81
+
82
+ if punkt_available:
83
+ try:
84
+ return word_tokenize(text.lower())
85
+ except Exception as e:
86
+ logger.warning(f"NLTK tokenization failed: {str(e)}")
87
+
88
+ # If NLTK tokenization is not available or fails, use simple tokenization method
89
+ return simple_tokenize(text)
90
+
91
+ # Load psycholinguistic dictionary (simulated - should use real data in actual applications)
92
+ class PsycholinguisticFeatures:
93
+ def __init__(self, lexicon_path: Optional[str] = None):
94
+ """
95
+ Initialize psycholinguistic feature extractor
96
+
97
+ Args:
98
+ lexicon_path: Path to psycholinguistic lexicon, uses simulated data if None
99
+ """
100
+ # If no lexicon is provided, create a simple simulated dictionary
101
+ if lexicon_path and os.path.exists(lexicon_path):
102
+ self.lexicon = pd.read_csv(lexicon_path)
103
+ self.word_to_scores = {
104
+ row['word']: {
105
+ 'valence': row['valence'],
106
+ 'arousal': row['arousal'],
107
+ 'dominance': row['dominance']
108
+ } for _, row in self.lexicon.iterrows()
109
+ }
110
+ else:
111
+ # Create simulated dictionary
112
+ self.word_to_scores = {}
113
+ # Sentiment vocabulary
114
+ positive_words = ['good', 'great', 'excellent', 'happy', 'joy', 'love', 'nice', 'wonderful', 'amazing', 'fantastic']
115
+ negative_words = ['bad', 'terrible', 'awful', 'sad', 'hate', 'poor', 'horrible', 'disappointing', 'worst', 'negative']
116
+ neutral_words = ['the', 'a', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'and', 'or', 'but', 'if', 'while', 'when']
117
+
118
+ # Assign high values to positive words
119
+ for word in positive_words:
120
+ self.word_to_scores[word] = {
121
+ 'valence': np.random.uniform(0.7, 0.9),
122
+ 'arousal': np.random.uniform(0.5, 0.8),
123
+ 'dominance': np.random.uniform(0.6, 0.9)
124
+ }
125
+
126
+ # Assign low values to negative words
127
+ for word in negative_words:
128
+ self.word_to_scores[word] = {
129
+ 'valence': np.random.uniform(0.1, 0.3),
130
+ 'arousal': np.random.uniform(0.5, 0.8),
131
+ 'dominance': np.random.uniform(0.1, 0.4)
132
+ }
133
+
134
+ # Assign medium values to neutral words
135
+ for word in neutral_words:
136
+ self.word_to_scores[word] = {
137
+ 'valence': np.random.uniform(0.4, 0.6),
138
+ 'arousal': np.random.uniform(0.3, 0.5),
139
+ 'dominance': np.random.uniform(0.4, 0.6)
140
+ }
141
+
142
+ def get_token_scores(self, token: str) -> Dict[str, float]:
143
+ """Get psycholinguistic scores for a single token"""
144
+ token = token.lower()
145
+ if token in self.word_to_scores:
146
+ return self.word_to_scores[token]
147
+ else:
148
+ # Return medium values for unknown words
149
+ return {
150
+ 'valence': 0.5,
151
+ 'arousal': 0.5,
152
+ 'dominance': 0.5
153
+ }
154
+
155
+ def get_importance_score(self, token: str) -> float:
156
+ """Calculate importance score for a token"""
157
+ scores = self.get_token_scores(token)
158
+ # Importance score is a weighted combination of valence, arousal, and dominance
159
+ # Here we give valence a higher weight because it is more relevant to sentiment analysis
160
+ importance = 0.6 * abs(scores['valence'] - 0.5) + 0.2 * scores['arousal'] + 0.2 * scores['dominance']
161
+ return importance
162
+
163
+ def compute_scores_for_text(self, text: str) -> List[Dict[str, float]]:
164
+ """Calculate psycholinguistic scores for each token in the text"""
165
+ tokens = safe_tokenize(text)
166
+ return [self.get_token_scores(token) for token in tokens]
167
+
168
+ def compute_importance_for_text(self, text: str) -> List[float]:
169
+ """Calculate importance scores for each token in the text"""
170
+ tokens = safe_tokenize(text)
171
+ return [self.get_importance_score(token) for token in tokens]
172
+
173
+
174
+ class LinguisticRules:
175
+ def __init__(self):
176
+ """Initialize linguistic rules processor"""
177
+ # Regular expressions for sarcasm patterns
178
+ self.sarcasm_patterns = [
179
+ r'(so|really|very|totally) (great|nice|good|wonderful|fantastic)',
180
+ r'(yeah|sure|right),? (like|as if)',
181
+ r'(oh|ah),? (great|wonderful|fantastic|perfect)'
182
+ ]
183
+
184
+ # List of negation words
185
+ self.negation_words = [
186
+ 'not', 'no', 'never', 'none', 'nobody', 'nothing', 'neither', 'nor', 'nowhere',
187
+ "don't", "doesn't", "didn't", "won't", "wouldn't", "couldn't", "shouldn't", "isn't", "aren't", "wasn't", "weren't"
188
+ ]
189
+
190
+ # Polysemous words and their possible substitutes
191
+ self.polysemy_words = {
192
+ 'fine': ['good', 'acceptable', 'penalty', 'delicate'],
193
+ 'right': ['correct', 'appropriate', 'conservative', 'direction'],
194
+ 'like': ['enjoy', 'similar', 'such as', 'want'],
195
+ 'mean': ['signify', 'unkind', 'average', 'intend'],
196
+ 'kind': ['type', 'benevolent', 'sort', 'sympathetic'],
197
+ 'fair': ['just', 'pale', 'average', 'exhibition'],
198
+ 'light': ['illumination', 'lightweight', 'pale', 'ignite'],
199
+ 'hard': ['difficult', 'solid', 'harsh', 'diligent'],
200
+ 'sound': ['noise', 'healthy', 'logical', 'measure'],
201
+ 'bright': ['intelligent', 'luminous', 'vivid', 'promising']
202
+ }
203
+
204
+ def detect_sarcasm(self, text: str) -> bool:
205
+ """Detect if sarcasm patterns exist in the text"""
206
+ text = text.lower()
207
+ for pattern in self.sarcasm_patterns:
208
+ if re.search(pattern, text):
209
+ return True
210
+ return False
211
+
212
+ def detect_negation(self, text: str) -> List[int]:
213
+ """Detect positions of negation words in the text"""
214
+ tokens = safe_tokenize(text)
215
+ negation_positions = []
216
+ for i, token in enumerate(tokens):
217
+ if token in self.negation_words:
218
+ negation_positions.append(i)
219
+ return negation_positions
220
+
221
+ def find_polysemy_words(self, text: str) -> Dict[int, List[str]]:
222
+ """Find polysemous words in the text and their possible substitutes"""
223
+ tokens = safe_tokenize(text)
224
+ polysemy_positions = {}
225
+ for i, token in enumerate(tokens):
226
+ if token in self.polysemy_words:
227
+ polysemy_positions[i] = self.polysemy_words[token]
228
+ return polysemy_positions
229
+
230
+ def get_wordnet_synonyms(self, word: str) -> List[str]:
231
+ """Get synonyms from WordNet"""
232
+ synonyms = []
233
+ for syn in wn.synsets(word):
234
+ for lemma in syn.lemmas():
235
+ synonyms.append(lemma.name())
236
+ return list(set(synonyms))
237
+
238
+ def apply_rule_transformations(self, token_embeddings: torch.Tensor, text: str, tokenizer) -> torch.Tensor:
239
+ """
240
+ Apply rule-based transformations to token embeddings
241
+
242
+ Args:
243
+ token_embeddings: Original token embeddings [batch_size, seq_len, hidden_dim]
244
+ text: Original text
245
+ tokenizer: Tokenizer
246
+
247
+ Returns:
248
+ Transformed token embeddings
249
+ """
250
+ # Clone original embeddings
251
+ transformed_embeddings = token_embeddings.clone()
252
+
253
+ try:
254
+ # Detect sarcasm
255
+ if self.detect_sarcasm(text):
256
+ # For sarcasm, we reverse sentiment-related embedding dimensions
257
+ # This is a simplified implementation, more complex transformations may be needed in real applications
258
+ sentiment_dims = torch.randperm(token_embeddings.shape[-1])[:token_embeddings.shape[-1]//10]
259
+ transformed_embeddings[:, :, sentiment_dims] = -transformed_embeddings[:, :, sentiment_dims]
260
+
261
+ # Handle negation
262
+ negation_positions = self.detect_negation(text)
263
+ if negation_positions:
264
+ # For words following negation words, reverse their sentiment-related embedding dimensions
265
+ try:
266
+ tokens = tokenizer.tokenize(text)
267
+ except Exception as e:
268
+ logger.warning(f"Tokenization failed: {str(e)}, using alternative tokenization")
269
+ tokens = safe_tokenize(text)
270
+
271
+ for pos in negation_positions:
272
+ if pos + 1 < len(tokens): # Ensure there's a word after the negation
273
+ # Find the position of the token after negation in the embeddings
274
+ # Simplified handling, actual applications should consider tokenization differences
275
+ sentiment_dims = torch.randperm(token_embeddings.shape[-1])[:token_embeddings.shape[-1]//10]
276
+ if pos + 1 < token_embeddings.shape[1]: # Ensure not exceeding embedding dimensions
277
+ transformed_embeddings[:, pos+1, sentiment_dims] = -transformed_embeddings[:, pos+1, sentiment_dims]
278
+
279
+ # Handle polysemy
280
+ polysemy_positions = self.find_polysemy_words(text)
281
+ if polysemy_positions:
282
+ # For polysemous words, add some noise to simulate semantic ambiguity
283
+ for pos in polysemy_positions:
284
+ if pos < token_embeddings.shape[1]: # Ensure not exceeding embedding dimensions
285
+ noise = torch.randn_like(transformed_embeddings[:, pos, :]) * 0.1
286
+ transformed_embeddings[:, pos, :] += noise
287
+ except Exception as e:
288
+ logger.error(f"Error applying rule transformations: {str(e)}")
289
+ # Return original embeddings in case of error
290
+
291
+ return transformed_embeddings
292
+
293
+
294
+ class HybridNoiseAugmentation:
295
+ def __init__(
296
+ self,
297
+ sigma: float = 0.1,
298
+ alpha: float = 0.5,
299
+ gamma: float = 0.1,
300
+ psycholinguistic_features: Optional[PsycholinguisticFeatures] = None,
301
+ linguistic_rules: Optional[LinguisticRules] = None
302
+ ):
303
+ """
304
+ Initialize hybrid noise augmentation
305
+
306
+ Args:
307
+ sigma: Scaling factor for Gaussian noise
308
+ alpha: Mixing weight parameter
309
+ gamma: Adjustment parameter in attention mechanism
310
+ psycholinguistic_features: Psycholinguistic feature extractor
311
+ linguistic_rules: Linguistic rules processor
312
+ """
313
+ self.sigma = sigma
314
+ self.alpha = alpha
315
+ self.gamma = gamma
316
+ self.psycholinguistic_features = psycholinguistic_features or PsycholinguisticFeatures()
317
+ self.linguistic_rules = linguistic_rules or LinguisticRules()
318
+
319
+ def apply_psycholinguistic_noise(
320
+ self,
321
+ token_embeddings: torch.Tensor,
322
+ texts: List[str],
323
+ tokenizer
324
+ ) -> torch.Tensor:
325
+ """
326
+ Apply psycholinguistic-based noise
327
+
328
+ Args:
329
+ token_embeddings: Original token embeddings [batch_size, seq_len, hidden_dim]
330
+ texts: List of original texts
331
+ tokenizer: Tokenizer
332
+
333
+ Returns:
334
+ Token embeddings with applied noise
335
+ """
336
+ batch_size, seq_len, hidden_dim = token_embeddings.shape
337
+ noised_embeddings = token_embeddings.clone()
338
+
339
+ for i, text in enumerate(texts):
340
+ try:
341
+ # Calculate importance scores for each token
342
+ importance_scores = self.psycholinguistic_features.compute_importance_for_text(text)
343
+
344
+ # Tokenize the text to match the model's tokenization
345
+ try:
346
+ model_tokens = tokenizer.tokenize(text)
347
+ except Exception as e:
348
+ logger.warning(f"Model tokenization failed: {str(e)}, using alternative tokenization")
349
+ model_tokens = safe_tokenize(text)
350
+
351
+ # Assign importance scores to each token (simplified handling)
352
+ token_scores = torch.ones(seq_len, device=token_embeddings.device) * 0.5
353
+ for j, token in enumerate(model_tokens[:seq_len-2]): # Exclude [CLS] and [SEP]
354
+ if j < len(importance_scores):
355
+ token_scores[j+1] = importance_scores[j] # +1 is for [CLS]
356
+
357
+ # Scale noise according to importance scores
358
+ noise = torch.randn_like(token_embeddings[i]) * self.sigma
359
+ scaled_noise = noise * token_scores.unsqueeze(1)
360
+
361
+ # Apply noise
362
+ noised_embeddings[i] = token_embeddings[i] + scaled_noise
363
+ except Exception as e:
364
+ logger.error(f"Error processing text {i}: {str(e)}")
365
+ # Use original embeddings in case of error
366
+ continue
367
+
368
+ return noised_embeddings
369
+
370
+ def apply_rule_based_perturbation(
371
+ self,
372
+ token_embeddings: torch.Tensor,
373
+ texts: List[str],
374
+ tokenizer
375
+ ) -> torch.Tensor:
376
+ """
377
+ Apply rule-based perturbation
378
+
379
+ Args:
380
+ token_embeddings: Original token embeddings [batch_size, seq_len, hidden_dim]
381
+ texts: List of original texts
382
+ tokenizer: Tokenizer
383
+
384
+ Returns:
385
+ Token embeddings with applied perturbation
386
+ """
387
+ batch_size = token_embeddings.shape[0]
388
+ perturbed_embeddings = token_embeddings.clone()
389
+
390
+ for i, text in enumerate(texts):
391
+ try:
392
+ # Apply rule transformations
393
+ perturbed_embeddings[i:i+1] = self.linguistic_rules.apply_rule_transformations(
394
+ token_embeddings[i:i+1], text, tokenizer
395
+ )
396
+ except Exception as e:
397
+ logger.error(f"Error applying rule transformations to text {i}: {str(e)}")
398
+ # Keep original embeddings in case of error
399
+ continue
400
+
401
+ return perturbed_embeddings
402
+
403
+ def generate_hybrid_embeddings(
404
+ self,
405
+ token_embeddings: torch.Tensor,
406
+ texts: List[str],
407
+ tokenizer
408
+ ) -> torch.Tensor:
409
+ """
410
+ Generate hybrid embeddings
411
+
412
+ Args:
413
+ token_embeddings: Original token embeddings [batch_size, seq_len, hidden_dim]
414
+ texts: List of original texts
415
+ tokenizer: Tokenizer
416
+
417
+ Returns:
418
+ Hybrid embeddings
419
+ """
420
+ # Apply psycholinguistic noise
421
+ psycholinguistic_embeddings = self.apply_psycholinguistic_noise(token_embeddings, texts, tokenizer)
422
+
423
+ # Apply rule-based perturbation
424
+ rule_based_embeddings = self.apply_rule_based_perturbation(token_embeddings, texts, tokenizer)
425
+
426
+ # Mix the two types of embeddings
427
+ hybrid_embeddings = (
428
+ self.alpha * psycholinguistic_embeddings +
429
+ (1 - self.alpha) * rule_based_embeddings
430
+ )
431
+
432
+ return hybrid_embeddings
433
+
434
+ def generate_psycholinguistic_alignment_matrix(
435
+ self,
436
+ texts: List[str],
437
+ seq_len: int,
438
+ device: torch.device
439
+ ) -> torch.Tensor:
440
+ """
441
+ Generate psycholinguistic alignment matrix
442
+
443
+ Args:
444
+ texts: List of original texts
445
+ seq_len: Sequence length
446
+ device: Computation device
447
+
448
+ Returns:
449
+ Psycholinguistic alignment matrix [batch_size, seq_len, seq_len]
450
+ """
451
+ batch_size = len(texts)
452
+ H = torch.zeros((batch_size, seq_len, seq_len), device=device)
453
+
454
+ for i, text in enumerate(texts):
455
+ try:
456
+ # Calculate importance scores for each token
457
+ importance_scores = self.psycholinguistic_features.compute_importance_for_text(text)
458
+
459
+ # Pad to sequence length
460
+ padded_scores = importance_scores + [0.5] * (seq_len - len(importance_scores))
461
+ padded_scores = padded_scores[:seq_len]
462
+
463
+ # Create alignment matrix
464
+ scores_tensor = torch.tensor(padded_scores, device=device)
465
+ # Use outer product to create matrix, emphasizing relationships between important tokens
466
+ H[i] = torch.outer(scores_tensor, scores_tensor)
467
+ except Exception as e:
468
+ logger.error(f"Error generating alignment matrix for text {i}: {str(e)}")
469
+ # Use default values in case of error
470
+ H[i] = torch.eye(seq_len, device=device) * 0.5
471
+
472
+ return H
examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/res/config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "max_seq_len": 50,
3
+ "epochs": 3,
4
+ "batch_size": 32,
5
+ "learning_rate": 2e-05,
6
+ "patience": 1,
7
+ "max_grad_norm": 10.0,
8
+ "warmup_ratio": 0.1,
9
+ "model_path": "/fs-computility/MA4Tool/shared/MA4Tool/hug_ckpts/BERT_ckpt",
10
+ "num_labels": 2,
11
+ "if_save_model": true,
12
+ "out_dir": "run_1",
13
+ "use_hybrid_augmentation": true,
14
+ "sigma": 0.1,
15
+ "alpha": 0.5,
16
+ "gamma": 0.1,
17
+ "evaluate_adversarial": true,
18
+ "adversarial_types": [
19
+ "sarcasm",
20
+ "negation",
21
+ "polysemy"
22
+ ]
23
+ }
examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/res/final_info.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "sentiment": {
3
+ "means": {
4
+ "best_acc": 0.9346512904997254,
5
+ "best_f1": 0.934620573857732,
6
+ "best_auc": 0.9836853202864146
7
+ }
8
+ }
9
+ }
examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/res/output/adversarial_results_epoch_1.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"sarcasm": 0.5, "negation": 0.8833333333333333, "polysemy": 0.875}
examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/res/output/adversarial_results_epoch_2.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"sarcasm": 0.5, "negation": 0.9291666666666667, "polysemy": 0.8854166666666666}
examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/res/output/adversarial_results_epoch_3.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"sarcasm": 0.5, "negation": 0.9333333333333333, "polysemy": 0.890625}
examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/res/output/best.pth.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67afe905b8fd06ae38035e639b627a1e6a9452861ec10a6913862848d465388f
3
+ size 1309283935
examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/res/output/test_prediction_epoch_1.csv ADDED
@@ -0,0 +1,1822 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ prob_1,prob_0,prediction
2
+ 0.017987346,0.9820126,0
3
+ 0.042204365,0.9577956,0
4
+ 0.005619384,0.9943806,0
5
+ 0.11165446,0.88834554,0
6
+ 0.9990607,0.0009393096,1
7
+ 0.9985576,0.0014423728,1
8
+ 0.09149068,0.9085093,0
9
+ 0.99038213,0.009617865,1
10
+ 0.061220925,0.93877906,0
11
+ 0.011999225,0.98800075,0
12
+ 0.27228156,0.7277185,0
13
+ 0.008975787,0.9910242,0
14
+ 0.4299652,0.5700348,0
15
+ 0.9629334,0.03706658,1
16
+ 0.0071097794,0.99289024,0
17
+ 0.9787445,0.021255493,1
18
+ 0.334868,0.665132,0
19
+ 0.014855476,0.9851445,0
20
+ 0.027147604,0.9728524,0
21
+ 0.18510099,0.814899,0
22
+ 0.99310476,0.006895244,1
23
+ 0.036302544,0.96369743,0
24
+ 0.79037297,0.20962703,1
25
+ 0.9979961,0.0020039082,1
26
+ 0.04303489,0.9569651,0
27
+ 0.010619129,0.9893809,0
28
+ 0.011770026,0.98823,0
29
+ 0.99478996,0.005210042,1
30
+ 0.016992524,0.9830075,0
31
+ 0.9948708,0.005129218,1
32
+ 0.9840884,0.01591158,1
33
+ 0.013054576,0.98694545,0
34
+ 0.9990336,0.0009664297,1
35
+ 0.9824228,0.017577171,1
36
+ 0.9991371,0.00086289644,1
37
+ 0.8966288,0.1033712,1
38
+ 0.9925351,0.0074648857,1
39
+ 0.9426959,0.057304084,1
40
+ 0.06966817,0.9303318,0
41
+ 0.02884251,0.9711575,0
42
+ 0.99894696,0.0010530353,1
43
+ 0.9879887,0.01201129,1
44
+ 0.0114549715,0.988545,0
45
+ 0.045888722,0.9541113,0
46
+ 0.005285148,0.99471486,0
47
+ 0.99889743,0.0011025667,1
48
+ 0.992642,0.0073580146,1
49
+ 0.8923526,0.10764742,1
50
+ 0.0046849255,0.9953151,0
51
+ 0.08761977,0.9123802,0
52
+ 0.0055984557,0.9944016,0
53
+ 0.99783057,0.0021694303,1
54
+ 0.9863326,0.013667405,1
55
+ 0.0030051973,0.9969948,0
56
+ 0.010365627,0.9896344,0
57
+ 0.99762577,0.0023742318,1
58
+ 0.035337064,0.9646629,0
59
+ 0.5066794,0.49332058,1
60
+ 0.09923833,0.90076166,0
61
+ 0.22973226,0.7702677,0
62
+ 0.9990382,0.00096178055,1
63
+ 0.032096967,0.967903,0
64
+ 0.04023811,0.9597619,0
65
+ 0.24629366,0.75370634,0
66
+ 0.9967726,0.0032274127,1
67
+ 0.5677537,0.43224633,1
68
+ 0.99842656,0.0015734434,1
69
+ 0.0048263585,0.99517363,0
70
+ 0.008843221,0.99115676,0
71
+ 0.12863255,0.87136745,0
72
+ 0.9976199,0.002380073,1
73
+ 0.04623503,0.953765,0
74
+ 0.030449219,0.9695508,0
75
+ 0.9942368,0.005763173,1
76
+ 0.9837632,0.016236782,1
77
+ 0.971387,0.028612971,1
78
+ 0.99683505,0.003164947,1
79
+ 0.5374164,0.4625836,1
80
+ 0.3822342,0.6177658,0
81
+ 0.00779091,0.9922091,0
82
+ 0.040041454,0.95995855,0
83
+ 0.021378562,0.9786214,0
84
+ 0.007720521,0.99227947,0
85
+ 0.004920162,0.9950798,0
86
+ 0.24052013,0.7594799,0
87
+ 0.88527,0.11473,1
88
+ 0.23186211,0.7681379,0
89
+ 0.89529455,0.10470545,1
90
+ 0.004739047,0.99526095,0
91
+ 0.01277206,0.9872279,0
92
+ 0.98643076,0.013569236,1
93
+ 0.9984895,0.0015105009,1
94
+ 0.9828911,0.017108917,1
95
+ 0.27236646,0.72763354,0
96
+ 0.793148,0.20685202,1
97
+ 0.9947455,0.005254507,1
98
+ 0.13926674,0.8607333,0
99
+ 0.01058491,0.9894151,0
100
+ 0.0038890217,0.996111,0
101
+ 0.79691553,0.20308447,1
102
+ 0.9986444,0.0013555884,1
103
+ 0.9979442,0.0020558238,1
104
+ 0.044441495,0.9555585,0
105
+ 0.88036644,0.119633555,1
106
+ 0.05361689,0.9463831,0
107
+ 0.069073334,0.9309267,0
108
+ 0.9851537,0.014846325,1
109
+ 0.9671583,0.032841682,1
110
+ 0.99958795,0.0004120469,1
111
+ 0.07798401,0.92201596,0
112
+ 0.0151429605,0.984857,0
113
+ 0.027767643,0.97223234,0
114
+ 0.50991946,0.49008054,1
115
+ 0.04143904,0.95856094,0
116
+ 0.944954,0.055046022,1
117
+ 0.13595119,0.86404884,0
118
+ 0.004967409,0.9950326,0
119
+ 0.39969513,0.60030484,0
120
+ 0.1258757,0.8741243,0
121
+ 0.999556,0.000443995,1
122
+ 0.9614389,0.038561106,1
123
+ 0.5401162,0.4598838,1
124
+ 0.98386616,0.016133845,1
125
+ 0.9994962,0.00050377846,1
126
+ 0.9833968,0.016603172,1
127
+ 0.0822222,0.9177778,0
128
+ 0.09499955,0.90500045,0
129
+ 0.42408872,0.5759113,0
130
+ 0.026542522,0.97345746,0
131
+ 0.9804621,0.019537926,1
132
+ 0.009204455,0.99079555,0
133
+ 0.6974513,0.3025487,1
134
+ 0.032219443,0.96778053,0
135
+ 0.0053759557,0.994624,0
136
+ 0.79667634,0.20332366,1
137
+ 0.017117947,0.982882,0
138
+ 0.3332854,0.6667146,0
139
+ 0.06325321,0.9367468,0
140
+ 0.9806444,0.019355595,1
141
+ 0.08949667,0.9105033,0
142
+ 0.9982358,0.0017641783,1
143
+ 0.23832552,0.76167446,0
144
+ 0.37258604,0.627414,0
145
+ 0.061296813,0.9387032,0
146
+ 0.69546574,0.30453426,1
147
+ 0.010370918,0.9896291,0
148
+ 0.98728067,0.012719333,1
149
+ 0.008952184,0.9910478,0
150
+ 0.99470633,0.0052936673,1
151
+ 0.03351435,0.9664856,0
152
+ 0.01411938,0.9858806,0
153
+ 0.023474963,0.976525,0
154
+ 0.045357186,0.95464283,0
155
+ 0.9864639,0.013536096,1
156
+ 0.010048469,0.98995155,0
157
+ 0.011650249,0.98834974,0
158
+ 0.9945498,0.005450189,1
159
+ 0.997837,0.002162993,1
160
+ 0.073611826,0.92638814,0
161
+ 0.99919385,0.0008061528,1
162
+ 0.008553626,0.9914464,0
163
+ 0.87333703,0.12666297,1
164
+ 0.9996351,0.00036489964,1
165
+ 0.9895453,0.010454714,1
166
+ 0.9983864,0.001613617,1
167
+ 0.9648008,0.035199225,1
168
+ 0.60617673,0.39382327,1
169
+ 0.9253185,0.07468152,1
170
+ 0.9993642,0.00063580275,1
171
+ 0.011158958,0.98884106,0
172
+ 0.02874459,0.9712554,0
173
+ 0.9985784,0.0014215708,1
174
+ 0.031271964,0.96872807,0
175
+ 0.04002577,0.9599742,0
176
+ 0.9972126,0.0027874112,1
177
+ 0.99383813,0.0061618686,1
178
+ 0.9614754,0.038524628,1
179
+ 0.99583364,0.0041663647,1
180
+ 0.9969478,0.003052175,1
181
+ 0.010668896,0.9893311,0
182
+ 0.009893158,0.9901068,0
183
+ 0.9844791,0.015520871,1
184
+ 0.9994199,0.0005800724,1
185
+ 0.03167376,0.9683262,0
186
+ 0.4701557,0.5298443,0
187
+ 0.99754936,0.002450645,1
188
+ 0.0043209693,0.995679,0
189
+ 0.96475405,0.035245955,1
190
+ 0.06419759,0.9358024,0
191
+ 0.92708415,0.07291585,1
192
+ 0.02166707,0.97833294,0
193
+ 0.13145709,0.8685429,0
194
+ 0.98438317,0.015616834,1
195
+ 0.9787667,0.02123332,1
196
+ 0.022242839,0.97775716,0
197
+ 0.9927382,0.0072618127,1
198
+ 0.99876547,0.0012345314,1
199
+ 0.009305185,0.9906948,0
200
+ 0.9990983,0.00090169907,1
201
+ 0.047434792,0.9525652,0
202
+ 0.99762017,0.0023798347,1
203
+ 0.0119836945,0.9880163,0
204
+ 0.00772583,0.99227417,0
205
+ 0.018312778,0.98168725,0
206
+ 0.9981,0.0019000173,1
207
+ 0.055732295,0.9442677,0
208
+ 0.57043123,0.42956877,1
209
+ 0.08081629,0.91918373,0
210
+ 0.5944859,0.40551412,1
211
+ 0.9900677,0.00993228,1
212
+ 0.9982674,0.0017325878,1
213
+ 0.98261136,0.017388642,1
214
+ 0.027647449,0.97235256,0
215
+ 0.9643887,0.03561127,1
216
+ 0.007830231,0.9921698,0
217
+ 0.012874723,0.9871253,0
218
+ 0.004971323,0.9950287,0
219
+ 0.99645185,0.0035481453,1
220
+ 0.007631885,0.9923681,0
221
+ 0.05523793,0.94476205,0
222
+ 0.021507613,0.9784924,0
223
+ 0.56656116,0.43343884,1
224
+ 0.05502834,0.9449717,0
225
+ 0.9326318,0.06736821,1
226
+ 0.9989182,0.0010818243,1
227
+ 0.9938803,0.006119728,1
228
+ 0.9995615,0.00043851137,1
229
+ 0.99590474,0.0040952563,1
230
+ 0.54554003,0.45445997,1
231
+ 0.005170423,0.9948296,0
232
+ 0.0044530723,0.99554694,0
233
+ 0.009713774,0.99028623,0
234
+ 0.9992995,0.0007004738,1
235
+ 0.98156965,0.018430352,1
236
+ 0.99961734,0.00038266182,1
237
+ 0.98606235,0.013937652,1
238
+ 0.0060764276,0.99392354,0
239
+ 0.9987924,0.0012075901,1
240
+ 0.96624213,0.033757865,1
241
+ 0.96980697,0.03019303,1
242
+ 0.9986945,0.0013055205,1
243
+ 0.07295518,0.9270448,0
244
+ 0.9995516,0.00044840574,1
245
+ 0.9258207,0.07417929,1
246
+ 0.9946548,0.0053452253,1
247
+ 0.31419918,0.6858008,0
248
+ 0.9994393,0.0005607009,1
249
+ 0.9782752,0.02172482,1
250
+ 0.006705578,0.9932944,0
251
+ 0.96855205,0.031447947,1
252
+ 0.9297427,0.070257306,1
253
+ 0.87682605,0.12317395,1
254
+ 0.99842715,0.0015728474,1
255
+ 0.037452232,0.9625478,0
256
+ 0.012539358,0.9874606,0
257
+ 0.9984841,0.0015159249,1
258
+ 0.0035404707,0.99645954,0
259
+ 0.99661934,0.0033806562,1
260
+ 0.12860009,0.8713999,0
261
+ 0.99860126,0.0013987422,1
262
+ 0.057501275,0.94249874,0
263
+ 0.99541193,0.0045880675,1
264
+ 0.009283306,0.9907167,0
265
+ 0.010831974,0.98916805,0
266
+ 0.99911934,0.0008806586,1
267
+ 0.99079025,0.009209752,1
268
+ 0.011105877,0.9888941,0
269
+ 0.9981325,0.0018674731,1
270
+ 0.99856466,0.0014353395,1
271
+ 0.9887949,0.011205077,1
272
+ 0.08465004,0.91534996,0
273
+ 0.025346467,0.97465354,0
274
+ 0.015314564,0.9846854,0
275
+ 0.9965281,0.003471911,1
276
+ 0.99497604,0.0050239563,1
277
+ 0.19253147,0.80746853,0
278
+ 0.04702908,0.9529709,0
279
+ 0.010440662,0.98955935,0
280
+ 0.9973041,0.002695918,1
281
+ 0.9846629,0.01533711,1
282
+ 0.9791108,0.020889223,1
283
+ 0.018119644,0.98188037,0
284
+ 0.9981969,0.0018031001,1
285
+ 0.015249709,0.9847503,0
286
+ 0.17621323,0.82378674,0
287
+ 0.95717597,0.04282403,1
288
+ 0.9933883,0.006611705,1
289
+ 0.999546,0.00045400858,1
290
+ 0.99009913,0.009900868,1
291
+ 0.0097715035,0.9902285,0
292
+ 0.017324992,0.982675,0
293
+ 0.9924763,0.0075237155,1
294
+ 0.008047933,0.99195206,0
295
+ 0.019001774,0.9809982,0
296
+ 0.014944721,0.98505527,0
297
+ 0.01756266,0.9824373,0
298
+ 0.9991417,0.0008583069,1
299
+ 0.9978131,0.0021868944,1
300
+ 0.98394644,0.016053557,1
301
+ 0.01241375,0.98758626,0
302
+ 0.99510217,0.004897833,1
303
+ 0.91950506,0.08049494,1
304
+ 0.102131665,0.89786834,0
305
+ 0.99925786,0.00074213743,1
306
+ 0.037711497,0.9622885,0
307
+ 0.101122774,0.8988772,0
308
+ 0.9947003,0.0052996874,1
309
+ 0.091061436,0.9089386,0
310
+ 0.75730544,0.24269456,1
311
+ 0.9954313,0.004568696,1
312
+ 0.014086184,0.9859138,0
313
+ 0.99539524,0.004604757,1
314
+ 0.018453714,0.9815463,0
315
+ 0.99829966,0.0017003417,1
316
+ 0.96803766,0.031962335,1
317
+ 0.050185136,0.94981486,0
318
+ 0.38001558,0.6199844,0
319
+ 0.9892384,0.010761619,1
320
+ 0.99823475,0.0017652512,1
321
+ 0.98553604,0.014463961,1
322
+ 0.96396804,0.03603196,1
323
+ 0.995589,0.004410982,1
324
+ 0.010450972,0.98954904,0
325
+ 0.03283675,0.96716326,0
326
+ 0.004416458,0.99558353,0
327
+ 0.004269006,0.995731,0
328
+ 0.008033765,0.99196625,0
329
+ 0.96800274,0.031997263,1
330
+ 0.037889402,0.9621106,0
331
+ 0.99840814,0.0015918612,1
332
+ 0.038101707,0.96189827,0
333
+ 0.9668745,0.03312552,1
334
+ 0.08468464,0.9153154,0
335
+ 0.9986553,0.0013446808,1
336
+ 0.95869124,0.04130876,1
337
+ 0.9897049,0.010295093,1
338
+ 0.005225606,0.9947744,0
339
+ 0.9976922,0.0023077726,1
340
+ 0.29817435,0.7018256,0
341
+ 0.998461,0.0015389919,1
342
+ 0.13103853,0.86896145,0
343
+ 0.9985176,0.0014824271,1
344
+ 0.018631425,0.9813686,0
345
+ 0.007553948,0.99244606,0
346
+ 0.9821675,0.017832518,1
347
+ 0.03639596,0.96360403,0
348
+ 0.99819213,0.0018078685,1
349
+ 0.07315975,0.92684025,0
350
+ 0.998536,0.0014640093,1
351
+ 0.06861384,0.9313862,0
352
+ 0.9842361,0.015763879,1
353
+ 0.9770156,0.022984385,1
354
+ 0.045052424,0.9549476,0
355
+ 0.02632695,0.97367305,0
356
+ 0.12479354,0.8752065,0
357
+ 0.027899565,0.97210044,0
358
+ 0.9970643,0.0029357076,1
359
+ 0.028322496,0.9716775,0
360
+ 0.015964283,0.98403573,0
361
+ 0.99454206,0.0054579377,1
362
+ 0.9567855,0.0432145,1
363
+ 0.1366626,0.8633374,0
364
+ 0.34570533,0.65429467,0
365
+ 0.98113364,0.01886636,1
366
+ 0.031976104,0.9680239,0
367
+ 0.9936114,0.0063886046,1
368
+ 0.074665144,0.9253349,0
369
+ 0.96817845,0.03182155,1
370
+ 0.027508667,0.9724913,0
371
+ 0.038272206,0.9617278,0
372
+ 0.1366477,0.8633523,0
373
+ 0.045209046,0.95479095,0
374
+ 0.9982004,0.0017995834,1
375
+ 0.99870825,0.0012917519,1
376
+ 0.13146307,0.86853695,0
377
+ 0.9978021,0.0021979213,1
378
+ 0.1191282,0.8808718,0
379
+ 0.14354594,0.8564541,0
380
+ 0.14098121,0.8590188,0
381
+ 0.07421217,0.9257878,0
382
+ 0.038740426,0.9612596,0
383
+ 0.99295145,0.0070485473,1
384
+ 0.01585439,0.9841456,0
385
+ 0.14390182,0.8560982,0
386
+ 0.8835642,0.116435826,1
387
+ 0.9970294,0.0029705763,1
388
+ 0.020482201,0.9795178,0
389
+ 0.99714226,0.0028577447,1
390
+ 0.00901483,0.99098516,0
391
+ 0.98934597,0.010654032,1
392
+ 0.023801634,0.9761984,0
393
+ 0.9186779,0.081322074,1
394
+ 0.90582275,0.094177246,1
395
+ 0.02475111,0.9752489,0
396
+ 0.3442358,0.6557642,0
397
+ 0.019960562,0.9800394,0
398
+ 0.030255651,0.9697443,0
399
+ 0.0067211078,0.9932789,0
400
+ 0.032122295,0.9678777,0
401
+ 0.17436148,0.82563853,0
402
+ 0.036086082,0.9639139,0
403
+ 0.9733636,0.026636422,1
404
+ 0.0072948597,0.99270517,0
405
+ 0.99385464,0.006145358,1
406
+ 0.050267994,0.949732,0
407
+ 0.99426794,0.0057320595,1
408
+ 0.008494619,0.9915054,0
409
+ 0.0058523207,0.99414766,0
410
+ 0.9979832,0.0020167828,1
411
+ 0.9989517,0.0010483265,1
412
+ 0.018339097,0.9816609,0
413
+ 0.008288293,0.99171174,0
414
+ 0.8102615,0.18973851,1
415
+ 0.38211793,0.6178821,0
416
+ 0.036204763,0.96379524,0
417
+ 0.02788097,0.97211903,0
418
+ 0.042824678,0.9571753,0
419
+ 0.99802667,0.001973331,1
420
+ 0.008822703,0.9911773,0
421
+ 0.9988279,0.0011721253,1
422
+ 0.71440965,0.28559035,1
423
+ 0.0091015315,0.9908985,0
424
+ 0.9986827,0.0013173223,1
425
+ 0.005577101,0.9944229,0
426
+ 0.0046732486,0.99532676,0
427
+ 0.8920117,0.1079883,1
428
+ 0.019544428,0.9804556,0
429
+ 0.017559746,0.98244023,0
430
+ 0.9991392,0.0008608103,1
431
+ 0.285806,0.714194,0
432
+ 0.004079517,0.9959205,0
433
+ 0.99448895,0.0055110455,1
434
+ 0.72328615,0.27671385,1
435
+ 0.992222,0.007777989,1
436
+ 0.84457546,0.15542454,1
437
+ 0.9900086,0.009991407,1
438
+ 0.023232585,0.9767674,0
439
+ 0.06461423,0.93538576,0
440
+ 0.9908214,0.009178579,1
441
+ 0.041911203,0.9580888,0
442
+ 0.005399338,0.99460065,0
443
+ 0.005777055,0.99422294,0
444
+ 0.008485552,0.99151444,0
445
+ 0.010486289,0.9895137,0
446
+ 0.9983606,0.0016394258,1
447
+ 0.99729997,0.0027000308,1
448
+ 0.04082743,0.95917255,0
449
+ 0.9795584,0.020441592,1
450
+ 0.18278207,0.81721795,0
451
+ 0.6752663,0.32473367,1
452
+ 0.025263365,0.97473663,0
453
+ 0.025001548,0.9749985,0
454
+ 0.008288305,0.9917117,0
455
+ 0.93799067,0.062009335,1
456
+ 0.9740321,0.025967896,1
457
+ 0.99840087,0.001599133,1
458
+ 0.013354494,0.9866455,0
459
+ 0.99022955,0.009770453,1
460
+ 0.9904971,0.009502888,1
461
+ 0.96959084,0.030409157,1
462
+ 0.023549955,0.97645,0
463
+ 0.99448305,0.0055169463,1
464
+ 0.94008815,0.059911847,1
465
+ 0.00910001,0.9909,0
466
+ 0.2211126,0.7788874,0
467
+ 0.071900345,0.92809963,0
468
+ 0.1399896,0.8600104,0
469
+ 0.89446133,0.105538666,1
470
+ 0.9986815,0.0013185143,1
471
+ 0.07689983,0.9231002,0
472
+ 0.03526106,0.96473897,0
473
+ 0.9944173,0.0055826902,1
474
+ 0.99555653,0.0044434667,1
475
+ 0.9300005,0.069999516,1
476
+ 0.99852353,0.0014764667,1
477
+ 0.04614967,0.9538503,0
478
+ 0.9988921,0.0011078715,1
479
+ 0.09094801,0.909052,0
480
+ 0.99436873,0.005631268,1
481
+ 0.9995783,0.00042170286,1
482
+ 0.99820864,0.001791358,1
483
+ 0.9936498,0.0063502192,1
484
+ 0.44385287,0.5561471,0
485
+ 0.015822127,0.9841779,0
486
+ 0.009705726,0.9902943,0
487
+ 0.99929214,0.00070786476,1
488
+ 0.91182107,0.08817893,1
489
+ 0.0532966,0.9467034,0
490
+ 0.19280082,0.8071992,0
491
+ 0.19682425,0.80317575,0
492
+ 0.99832064,0.0016793609,1
493
+ 0.98866165,0.011338353,1
494
+ 0.00702284,0.99297714,0
495
+ 0.9968426,0.0031573772,1
496
+ 0.97612256,0.023877442,1
497
+ 0.9994753,0.0005246997,1
498
+ 0.04822871,0.95177126,0
499
+ 0.04630028,0.9536997,0
500
+ 0.03636849,0.9636315,0
501
+ 0.06707926,0.93292075,0
502
+ 0.9209848,0.079015195,1
503
+ 0.041918114,0.9580819,0
504
+ 0.9982919,0.0017080903,1
505
+ 0.9916923,0.008307695,1
506
+ 0.99958557,0.0004144311,1
507
+ 0.93958086,0.060419142,1
508
+ 0.99822384,0.0017761588,1
509
+ 0.0048143035,0.9951857,0
510
+ 0.80963826,0.19036174,1
511
+ 0.9971149,0.0028851032,1
512
+ 0.9018868,0.09811318,1
513
+ 0.019182542,0.98081744,0
514
+ 0.9978204,0.0021796227,1
515
+ 0.013780018,0.98622,0
516
+ 0.061618652,0.9383814,0
517
+ 0.023082592,0.9769174,0
518
+ 0.3153884,0.68461156,0
519
+ 0.9961062,0.0038937926,1
520
+ 0.00821002,0.99179,0
521
+ 0.0039193057,0.9960807,0
522
+ 0.022387194,0.9776128,0
523
+ 0.0040321983,0.9959678,0
524
+ 0.013242932,0.98675704,0
525
+ 0.018313259,0.9816867,0
526
+ 0.45009077,0.54990923,0
527
+ 0.0863413,0.9136587,0
528
+ 0.76835245,0.23164755,1
529
+ 0.1501905,0.8498095,0
530
+ 0.08360305,0.916397,0
531
+ 0.062319502,0.9376805,0
532
+ 0.020205287,0.97979474,0
533
+ 0.9878077,0.012192309,1
534
+ 0.99364495,0.006355047,1
535
+ 0.97317296,0.026827037,1
536
+ 0.9520607,0.0479393,1
537
+ 0.014889939,0.98511004,0
538
+ 0.99859923,0.0014007688,1
539
+ 0.96271724,0.037282765,1
540
+ 0.16897695,0.83102304,0
541
+ 0.9984168,0.0015832186,1
542
+ 0.016794441,0.98320556,0
543
+ 0.99654347,0.003456533,1
544
+ 0.06750028,0.9324997,0
545
+ 0.9909288,0.009071171,1
546
+ 0.95343995,0.04656005,1
547
+ 0.017504636,0.98249537,0
548
+ 0.14900282,0.8509972,0
549
+ 0.85243565,0.14756435,1
550
+ 0.84768194,0.15231806,1
551
+ 0.9972241,0.0027759075,1
552
+ 0.084082656,0.91591734,0
553
+ 0.010540418,0.9894596,0
554
+ 0.016495451,0.98350453,0
555
+ 0.9985752,0.0014247894,1
556
+ 0.98048353,0.019516468,1
557
+ 0.19532166,0.8046783,0
558
+ 0.9886362,0.011363804,1
559
+ 0.08076632,0.9192337,0
560
+ 0.008728915,0.9912711,0
561
+ 0.0574543,0.9425457,0
562
+ 0.011110738,0.9888893,0
563
+ 0.99922097,0.0007790327,1
564
+ 0.98893434,0.011065662,1
565
+ 0.9970259,0.002974093,1
566
+ 0.022110134,0.9778899,0
567
+ 0.9886747,0.0113253,1
568
+ 0.88777745,0.11222255,1
569
+ 0.07979943,0.9202006,0
570
+ 0.99501956,0.004980445,1
571
+ 0.9837857,0.016214311,1
572
+ 0.99674195,0.0032580495,1
573
+ 0.9960226,0.003977418,1
574
+ 0.9243109,0.07568908,1
575
+ 0.022813339,0.9771867,0
576
+ 0.010475184,0.98952484,0
577
+ 0.24669257,0.75330746,0
578
+ 0.0079005575,0.99209946,0
579
+ 0.9943777,0.0056223273,1
580
+ 0.9646703,0.0353297,1
581
+ 0.5611204,0.4388796,1
582
+ 0.98852074,0.011479259,1
583
+ 0.99904543,0.0009545684,1
584
+ 0.99619746,0.003802538,1
585
+ 0.686266,0.313734,1
586
+ 0.9048934,0.0951066,1
587
+ 0.998626,0.0013740063,1
588
+ 0.020714786,0.97928524,0
589
+ 0.08723712,0.9127629,0
590
+ 0.010887853,0.98911214,0
591
+ 0.9981007,0.001899302,1
592
+ 0.008363384,0.99163663,0
593
+ 0.07330415,0.9266958,0
594
+ 0.042684928,0.9573151,0
595
+ 0.9953022,0.0046977997,1
596
+ 0.95522714,0.044772863,1
597
+ 0.004503014,0.995497,0
598
+ 0.99336654,0.0066334605,1
599
+ 0.011427498,0.9885725,0
600
+ 0.0059831645,0.9940168,0
601
+ 0.033026725,0.9669733,0
602
+ 0.95260864,0.047391355,1
603
+ 0.99024045,0.009759545,1
604
+ 0.9495226,0.050477386,1
605
+ 0.053587113,0.94641286,0
606
+ 0.0058875396,0.99411243,0
607
+ 0.012356952,0.98764306,0
608
+ 0.5658752,0.43412483,1
609
+ 0.24846739,0.7515326,0
610
+ 0.008855287,0.9911447,0
611
+ 0.7569278,0.24307221,1
612
+ 0.006064755,0.9939352,0
613
+ 0.04972837,0.9502716,0
614
+ 0.97489923,0.025100768,1
615
+ 0.0055999,0.9944001,0
616
+ 0.8805979,0.11940211,1
617
+ 0.01181866,0.98818135,0
618
+ 0.9937744,0.006225586,1
619
+ 0.28084522,0.7191548,0
620
+ 0.15967377,0.84032625,0
621
+ 0.9889797,0.011020303,1
622
+ 0.989017,0.01098299,1
623
+ 0.008059711,0.99194026,0
624
+ 0.71391255,0.28608745,1
625
+ 0.5856572,0.41434282,1
626
+ 0.06609964,0.93390036,0
627
+ 0.0070652305,0.99293476,0
628
+ 0.99846435,0.0015356541,1
629
+ 0.998755,0.0012450218,1
630
+ 0.18821171,0.8117883,0
631
+ 0.81269485,0.18730515,1
632
+ 0.3758352,0.6241648,0
633
+ 0.9993325,0.0006675124,1
634
+ 0.99910396,0.0008960366,1
635
+ 0.91652584,0.08347416,1
636
+ 0.9978934,0.002106607,1
637
+ 0.022769466,0.97723055,0
638
+ 0.010279603,0.9897204,0
639
+ 0.05468426,0.9453157,0
640
+ 0.7462674,0.25373262,1
641
+ 0.9755903,0.024409711,1
642
+ 0.9968106,0.003189385,1
643
+ 0.9993394,0.0006605983,1
644
+ 0.16288641,0.8371136,0
645
+ 0.15165526,0.84834474,0
646
+ 0.56493163,0.43506837,1
647
+ 0.014805926,0.9851941,0
648
+ 0.00980802,0.990192,0
649
+ 0.11050759,0.8894924,0
650
+ 0.9988618,0.0011382103,1
651
+ 0.098538876,0.9014611,0
652
+ 0.999263,0.00073701143,1
653
+ 0.9710623,0.028937697,1
654
+ 0.19719194,0.80280805,0
655
+ 0.9990664,0.00093358755,1
656
+ 0.9038046,0.0961954,1
657
+ 0.97925305,0.020746946,1
658
+ 0.015448706,0.9845513,0
659
+ 0.8359812,0.16401881,1
660
+ 0.9935063,0.0064936876,1
661
+ 0.041005027,0.958995,0
662
+ 0.006623385,0.9933766,0
663
+ 0.9715403,0.028459728,1
664
+ 0.00586235,0.99413764,0
665
+ 0.99843687,0.0015631318,1
666
+ 0.9931322,0.006867826,1
667
+ 0.95797384,0.042026162,1
668
+ 0.9730959,0.026904106,1
669
+ 0.9989213,0.0010787249,1
670
+ 0.020064415,0.9799356,0
671
+ 0.0082015665,0.99179846,0
672
+ 0.22102411,0.7789759,0
673
+ 0.050262906,0.9497371,0
674
+ 0.9907376,0.009262383,1
675
+ 0.02434753,0.97565246,0
676
+ 0.0040121526,0.99598783,0
677
+ 0.06719887,0.9328011,0
678
+ 0.09283851,0.9071615,0
679
+ 0.8579973,0.1420027,1
680
+ 0.003544694,0.9964553,0
681
+ 0.0127275605,0.98727244,0
682
+ 0.14148831,0.8585117,0
683
+ 0.17369907,0.8263009,0
684
+ 0.99048513,0.009514868,1
685
+ 0.006100175,0.9938998,0
686
+ 0.045033567,0.9549664,0
687
+ 0.02485333,0.97514665,0
688
+ 0.011303022,0.988697,0
689
+ 0.005073385,0.99492663,0
690
+ 0.9245911,0.075408876,1
691
+ 0.01278884,0.98721117,0
692
+ 0.25088003,0.74912,0
693
+ 0.019671641,0.9803284,0
694
+ 0.018753184,0.9812468,0
695
+ 0.9745849,0.025415123,1
696
+ 0.96467376,0.035326242,1
697
+ 0.997834,0.0021659732,1
698
+ 0.022014692,0.9779853,0
699
+ 0.9980742,0.0019258261,1
700
+ 0.9927483,0.00725168,1
701
+ 0.7059853,0.2940147,1
702
+ 0.08704138,0.9129586,0
703
+ 0.9972367,0.0027632713,1
704
+ 0.9983884,0.0016115904,1
705
+ 0.99655616,0.0034438372,1
706
+ 0.9986558,0.001344204,1
707
+ 0.99494886,0.005051136,1
708
+ 0.9940229,0.005977094,1
709
+ 0.96484864,0.035151362,1
710
+ 0.007455511,0.9925445,0
711
+ 0.005527592,0.9944724,0
712
+ 0.98621434,0.01378566,1
713
+ 0.871016,0.12898397,1
714
+ 0.89377874,0.10622126,1
715
+ 0.99446845,0.0055315495,1
716
+ 0.91739124,0.08260876,1
717
+ 0.6040018,0.39599818,1
718
+ 0.99962044,0.00037956238,1
719
+ 0.044754434,0.95524555,0
720
+ 0.029226534,0.97077346,0
721
+ 0.053961582,0.9460384,0
722
+ 0.04420892,0.95579106,0
723
+ 0.021653917,0.9783461,0
724
+ 0.97208977,0.027910233,1
725
+ 0.11175786,0.8882421,0
726
+ 0.35581326,0.64418674,0
727
+ 0.9964761,0.0035238862,1
728
+ 0.96563864,0.034361362,1
729
+ 0.8700507,0.12994927,1
730
+ 0.045485277,0.95451474,0
731
+ 0.059130877,0.9408691,0
732
+ 0.016029313,0.9839707,0
733
+ 0.015538482,0.98446155,0
734
+ 0.006073704,0.9939263,0
735
+ 0.9943797,0.005620301,1
736
+ 0.06907608,0.93092394,0
737
+ 0.9985827,0.0014172792,1
738
+ 0.98735875,0.012641251,1
739
+ 0.83207315,0.16792685,1
740
+ 0.9978781,0.0021219254,1
741
+ 0.99727625,0.0027237535,1
742
+ 0.1798166,0.8201834,0
743
+ 0.99847955,0.0015204549,1
744
+ 0.99838984,0.0016101599,1
745
+ 0.9221445,0.07785553,1
746
+ 0.3953893,0.6046107,0
747
+ 0.033834685,0.9661653,0
748
+ 0.93407387,0.065926135,1
749
+ 0.9978532,0.0021467805,1
750
+ 0.047393076,0.9526069,0
751
+ 0.009977417,0.9900226,0
752
+ 0.9984042,0.0015957952,1
753
+ 0.9225982,0.07740182,1
754
+ 0.29334685,0.7066531,0
755
+ 0.9311111,0.0688889,1
756
+ 0.0069155716,0.99308443,0
757
+ 0.18769734,0.81230265,0
758
+ 0.5133388,0.4866612,1
759
+ 0.99643123,0.0035687685,1
760
+ 0.31822467,0.68177533,0
761
+ 0.9993374,0.00066262484,1
762
+ 0.009679692,0.9903203,0
763
+ 0.013280961,0.986719,0
764
+ 0.9718593,0.028140724,1
765
+ 0.9918938,0.008106172,1
766
+ 0.14532466,0.85467535,0
767
+ 0.0037415025,0.9962585,0
768
+ 0.03407019,0.9659298,0
769
+ 0.97755814,0.022441864,1
770
+ 0.81584525,0.18415475,1
771
+ 0.741764,0.258236,1
772
+ 0.013957634,0.9860424,0
773
+ 0.9791868,0.020813227,1
774
+ 0.9016765,0.098323524,1
775
+ 0.9823056,0.017694414,1
776
+ 0.943373,0.056626976,1
777
+ 0.99617445,0.0038255453,1
778
+ 0.07151011,0.9284899,0
779
+ 0.019989952,0.98001003,0
780
+ 0.026443437,0.9735566,0
781
+ 0.8683212,0.13167882,1
782
+ 0.041806854,0.9581931,0
783
+ 0.04947704,0.95052296,0
784
+ 0.012653585,0.9873464,0
785
+ 0.6076077,0.39239228,1
786
+ 0.0809881,0.9190119,0
787
+ 0.99835867,0.0016413331,1
788
+ 0.9880654,0.011934578,1
789
+ 0.006595992,0.99340403,0
790
+ 0.99685466,0.003145337,1
791
+ 0.57808423,0.42191577,1
792
+ 0.99483997,0.0051600337,1
793
+ 0.32904592,0.6709541,0
794
+ 0.9855618,0.014438212,1
795
+ 0.009135274,0.99086475,0
796
+ 0.0036716368,0.99632835,0
797
+ 0.33460712,0.6653929,0
798
+ 0.08088086,0.9191191,0
799
+ 0.99136263,0.008637369,1
800
+ 0.004638182,0.9953618,0
801
+ 0.027611783,0.9723882,0
802
+ 0.06975093,0.9302491,0
803
+ 0.9995708,0.00042921305,1
804
+ 0.99814713,0.00185287,1
805
+ 0.010676901,0.9893231,0
806
+ 0.5979657,0.40203428,1
807
+ 0.005330069,0.9946699,0
808
+ 0.034967065,0.96503294,0
809
+ 0.9868292,0.013170779,1
810
+ 0.036505904,0.9634941,0
811
+ 0.44529447,0.5547055,0
812
+ 0.085055694,0.9149443,0
813
+ 0.40930474,0.59069526,0
814
+ 0.022625392,0.9773746,0
815
+ 0.9992893,0.0007107258,1
816
+ 0.9983209,0.0016791224,1
817
+ 0.9939918,0.006008208,1
818
+ 0.009977478,0.99002254,0
819
+ 0.03031458,0.96968544,0
820
+ 0.03933548,0.9606645,0
821
+ 0.9973109,0.0026891232,1
822
+ 0.012369861,0.9876301,0
823
+ 0.009919452,0.99008054,0
824
+ 0.003967394,0.9960326,0
825
+ 0.61004144,0.38995856,1
826
+ 0.092712425,0.9072876,0
827
+ 0.6661691,0.3338309,1
828
+ 0.024874799,0.9751252,0
829
+ 0.008128429,0.9918716,0
830
+ 0.9201727,0.07982731,1
831
+ 0.90013844,0.09986156,1
832
+ 0.016272707,0.9837273,0
833
+ 0.009259488,0.99074054,0
834
+ 0.045108136,0.95489186,0
835
+ 0.004623416,0.9953766,0
836
+ 0.095515065,0.9044849,0
837
+ 0.00910382,0.99089617,0
838
+ 0.9967338,0.0032662153,1
839
+ 0.009219348,0.99078065,0
840
+ 0.74009293,0.25990707,1
841
+ 0.029697519,0.97030246,0
842
+ 0.9995357,0.00046432018,1
843
+ 0.15477137,0.8452286,0
844
+ 0.9360491,0.063950896,1
845
+ 0.18420275,0.81579727,0
846
+ 0.0057439962,0.994256,0
847
+ 0.8495428,0.1504572,1
848
+ 0.065215774,0.93478423,0
849
+ 0.990941,0.009059012,1
850
+ 0.5047569,0.49524307,1
851
+ 0.099932,0.900068,0
852
+ 0.77030754,0.22969246,1
853
+ 0.1318299,0.8681701,0
854
+ 0.032800034,0.9672,0
855
+ 0.6238927,0.37610728,1
856
+ 0.007953466,0.99204654,0
857
+ 0.9985965,0.0014035106,1
858
+ 0.5803615,0.4196385,1
859
+ 0.007746156,0.99225384,0
860
+ 0.023724733,0.97627527,0
861
+ 0.0556386,0.9443614,0
862
+ 0.9970016,0.0029984117,1
863
+ 0.9261304,0.073869586,1
864
+ 0.01777667,0.98222333,0
865
+ 0.9532752,0.046724796,1
866
+ 0.8831005,0.11689949,1
867
+ 0.9995572,0.0004428029,1
868
+ 0.8721796,0.12782037,1
869
+ 0.5037541,0.49624592,1
870
+ 0.0069598034,0.9930402,0
871
+ 0.08025726,0.91974276,0
872
+ 0.25673786,0.7432622,0
873
+ 0.12441478,0.8755852,0
874
+ 0.9992532,0.0007467866,1
875
+ 0.999086,0.0009139776,1
876
+ 0.99950063,0.0004993677,1
877
+ 0.9957129,0.0042871237,1
878
+ 0.9969747,0.0030252934,1
879
+ 0.9968554,0.0031446218,1
880
+ 0.0067989957,0.993201,0
881
+ 0.9993717,0.00062829256,1
882
+ 0.008507871,0.99149215,0
883
+ 0.028463159,0.9715368,0
884
+ 0.013464234,0.9865358,0
885
+ 0.98946357,0.010536432,1
886
+ 0.8603748,0.13962519,1
887
+ 0.023518743,0.97648126,0
888
+ 0.90848714,0.09151286,1
889
+ 0.9970233,0.0029767156,1
890
+ 0.9983057,0.0016943216,1
891
+ 0.9855457,0.014454305,1
892
+ 0.025178231,0.97482175,0
893
+ 0.38972977,0.61027026,0
894
+ 0.006671187,0.9933288,0
895
+ 0.8236027,0.17639732,1
896
+ 0.9991393,0.00086069107,1
897
+ 0.99924743,0.00075256824,1
898
+ 0.87936443,0.12063557,1
899
+ 0.9963427,0.0036572814,1
900
+ 0.9990728,0.00092720985,1
901
+ 0.9866289,0.01337111,1
902
+ 0.009135871,0.99086416,0
903
+ 0.37113473,0.62886524,0
904
+ 0.8255929,0.17440712,1
905
+ 0.84017515,0.15982485,1
906
+ 0.24333924,0.75666076,0
907
+ 0.01767512,0.9823249,0
908
+ 0.3193511,0.6806489,0
909
+ 0.32349592,0.6765041,0
910
+ 0.009757376,0.9902426,0
911
+ 0.059711967,0.940288,0
912
+ 0.048434716,0.95156527,0
913
+ 0.9971687,0.0028312802,1
914
+ 0.006627148,0.99337286,0
915
+ 0.21780026,0.78219974,0
916
+ 0.763375,0.23662502,1
917
+ 0.9526471,0.04735291,1
918
+ 0.9456123,0.05438769,1
919
+ 0.9966397,0.0033602715,1
920
+ 0.97273964,0.027260363,1
921
+ 0.99304914,0.0069508553,1
922
+ 0.11976255,0.88023746,0
923
+ 0.011550046,0.98844993,0
924
+ 0.7728524,0.22714758,1
925
+ 0.088624254,0.91137576,0
926
+ 0.0072288644,0.99277115,0
927
+ 0.16715257,0.8328474,0
928
+ 0.05877057,0.9412294,0
929
+ 0.57725894,0.42274106,1
930
+ 0.7936089,0.2063911,1
931
+ 0.6493381,0.35066187,1
932
+ 0.020306258,0.9796938,0
933
+ 0.009961284,0.9900387,0
934
+ 0.19224018,0.8077598,0
935
+ 0.7799489,0.22005111,1
936
+ 0.4005932,0.59940684,0
937
+ 0.006853562,0.9931464,0
938
+ 0.010784755,0.98921525,0
939
+ 0.9719069,0.0280931,1
940
+ 0.9991703,0.00082969666,1
941
+ 0.007835059,0.99216497,0
942
+ 0.02409257,0.97590744,0
943
+ 0.009471969,0.99052805,0
944
+ 0.8849896,0.11501038,1
945
+ 0.00860207,0.9913979,0
946
+ 0.8376789,0.16232109,1
947
+ 0.030283406,0.9697166,0
948
+ 0.050445966,0.949554,0
949
+ 0.031760346,0.96823967,0
950
+ 0.96670693,0.03329307,1
951
+ 0.9897713,0.0102286935,1
952
+ 0.48868972,0.5113103,0
953
+ 0.0074922163,0.99250776,0
954
+ 0.044191115,0.9558089,0
955
+ 0.9986461,0.0013539195,1
956
+ 0.99760157,0.0023984313,1
957
+ 0.08052328,0.91947675,0
958
+ 0.086333334,0.91366667,0
959
+ 0.014533688,0.9854663,0
960
+ 0.06986273,0.9301373,0
961
+ 0.9155712,0.08442879,1
962
+ 0.018774696,0.9812253,0
963
+ 0.0048700487,0.99512994,0
964
+ 0.020125298,0.97987473,0
965
+ 0.039828893,0.9601711,0
966
+ 0.032481864,0.96751815,0
967
+ 0.008434963,0.99156505,0
968
+ 0.99925596,0.0007440448,1
969
+ 0.99818003,0.0018199682,1
970
+ 0.98027897,0.019721031,1
971
+ 0.96640164,0.033598363,1
972
+ 0.99490404,0.0050959587,1
973
+ 0.9279291,0.0720709,1
974
+ 0.007599113,0.9924009,0
975
+ 0.011121908,0.9888781,0
976
+ 0.64468837,0.35531163,1
977
+ 0.990404,0.00959599,1
978
+ 0.9665553,0.033444703,1
979
+ 0.0107031865,0.9892968,0
980
+ 0.019392272,0.98060775,0
981
+ 0.0033303546,0.99666965,0
982
+ 0.009797643,0.99020237,0
983
+ 0.010099522,0.98990047,0
984
+ 0.99910945,0.000890553,1
985
+ 0.9873333,0.012666702,1
986
+ 0.013250164,0.9867498,0
987
+ 0.9914556,0.008544385,1
988
+ 0.99537116,0.004628837,1
989
+ 0.0383242,0.9616758,0
990
+ 0.20953487,0.7904651,0
991
+ 0.9945886,0.0054113865,1
992
+ 0.016832236,0.98316777,0
993
+ 0.48680806,0.51319194,0
994
+ 0.86839044,0.13160956,1
995
+ 0.044758134,0.95524186,0
996
+ 0.13485679,0.8651432,0
997
+ 0.0157662,0.9842338,0
998
+ 0.451411,0.548589,0
999
+ 0.11363065,0.88636935,0
1000
+ 0.023263332,0.97673666,0
1001
+ 0.7134627,0.2865373,1
1002
+ 0.0037919132,0.9962081,0
1003
+ 0.013050195,0.9869498,0
1004
+ 0.8444543,0.15554571,1
1005
+ 0.9903319,0.009668112,1
1006
+ 0.99921525,0.00078475475,1
1007
+ 0.99898714,0.0010128617,1
1008
+ 0.13620225,0.8637978,0
1009
+ 0.013536919,0.98646307,0
1010
+ 0.99317753,0.006822467,1
1011
+ 0.028010461,0.9719895,0
1012
+ 0.9976146,0.002385378,1
1013
+ 0.004399622,0.9956004,0
1014
+ 0.99833626,0.0016637444,1
1015
+ 0.08251567,0.91748434,0
1016
+ 0.10332264,0.8966774,0
1017
+ 0.040158797,0.9598412,0
1018
+ 0.97927505,0.020724952,1
1019
+ 0.9992899,0.00071012974,1
1020
+ 0.9805861,0.019413888,1
1021
+ 0.99103546,0.008964539,1
1022
+ 0.9977569,0.0022431016,1
1023
+ 0.9481278,0.051872194,1
1024
+ 0.98365295,0.01634705,1
1025
+ 0.00813519,0.9918648,0
1026
+ 0.9970612,0.002938807,1
1027
+ 0.26596302,0.734037,0
1028
+ 0.009799059,0.99020094,0
1029
+ 0.018850708,0.9811493,0
1030
+ 0.0105197355,0.98948026,0
1031
+ 0.010761922,0.9892381,0
1032
+ 0.0024888667,0.99751115,0
1033
+ 0.9988703,0.0011296868,1
1034
+ 0.037258502,0.9627415,0
1035
+ 0.9983498,0.0016502142,1
1036
+ 0.9954424,0.0045576096,1
1037
+ 0.27402484,0.72597516,0
1038
+ 0.98955137,0.010448635,1
1039
+ 0.9904586,0.009541392,1
1040
+ 0.009958584,0.99004143,0
1041
+ 0.97925276,0.020747244,1
1042
+ 0.013176877,0.98682314,0
1043
+ 0.9811686,0.018831372,1
1044
+ 0.7930621,0.20693791,1
1045
+ 0.98382646,0.016173542,1
1046
+ 0.015370493,0.9846295,0
1047
+ 0.9974444,0.0025556087,1
1048
+ 0.017223349,0.98277664,0
1049
+ 0.9930761,0.006923914,1
1050
+ 0.98881847,0.011181533,1
1051
+ 0.020088136,0.97991186,0
1052
+ 0.98676527,0.013234735,1
1053
+ 0.7441848,0.2558152,1
1054
+ 0.021257112,0.9787429,0
1055
+ 0.049500823,0.9504992,0
1056
+ 0.9956418,0.0043581724,1
1057
+ 0.98011106,0.019888937,1
1058
+ 0.04486373,0.9551363,0
1059
+ 0.010076289,0.9899237,0
1060
+ 0.042884048,0.95711595,0
1061
+ 0.004081422,0.9959186,0
1062
+ 0.92431647,0.075683534,1
1063
+ 0.0061153546,0.9938846,0
1064
+ 0.03065702,0.969343,0
1065
+ 0.99942625,0.0005737543,1
1066
+ 0.9969342,0.0030658245,1
1067
+ 0.33664927,0.6633507,0
1068
+ 0.8323451,0.16765487,1
1069
+ 0.48339933,0.51660067,0
1070
+ 0.8023578,0.1976422,1
1071
+ 0.99521106,0.004788935,1
1072
+ 0.008354017,0.991646,0
1073
+ 0.0083338395,0.99166614,0
1074
+ 0.9990434,0.00095659494,1
1075
+ 0.027421737,0.9725783,0
1076
+ 0.6689694,0.3310306,1
1077
+ 0.9975788,0.0024212003,1
1078
+ 0.008043389,0.9919566,0
1079
+ 0.9897676,0.010232389,1
1080
+ 0.97369415,0.026305854,1
1081
+ 0.999448,0.0005519986,1
1082
+ 0.0062954775,0.9937045,0
1083
+ 0.018832054,0.981168,0
1084
+ 0.02576671,0.97423327,0
1085
+ 0.97875744,0.021242559,1
1086
+ 0.9788224,0.02117759,1
1087
+ 0.9962846,0.003715396,1
1088
+ 0.99609756,0.0039024353,1
1089
+ 0.98931915,0.010680854,1
1090
+ 0.9994235,0.0005764961,1
1091
+ 0.0328933,0.9671067,0
1092
+ 0.0029920537,0.99700797,0
1093
+ 0.044153806,0.9558462,0
1094
+ 0.007982964,0.99201703,0
1095
+ 0.99761534,0.0023846626,1
1096
+ 0.21471351,0.7852865,0
1097
+ 0.05046377,0.9495362,0
1098
+ 0.012508022,0.98749197,0
1099
+ 0.13305728,0.8669427,0
1100
+ 0.7859841,0.2140159,1
1101
+ 0.19470027,0.80529976,0
1102
+ 0.017502619,0.9824974,0
1103
+ 0.005371453,0.99462855,0
1104
+ 0.9415917,0.05840832,1
1105
+ 0.38696468,0.6130353,0
1106
+ 0.027144982,0.97285503,0
1107
+ 0.12719089,0.8728091,0
1108
+ 0.99023587,0.009764135,1
1109
+ 0.048203036,0.95179695,0
1110
+ 0.9876102,0.012389779,1
1111
+ 0.0053080847,0.9946919,0
1112
+ 0.06958628,0.9304137,0
1113
+ 0.33484548,0.6651545,0
1114
+ 0.9761646,0.02383542,1
1115
+ 0.956077,0.04392302,1
1116
+ 0.004388816,0.9956112,0
1117
+ 0.05100796,0.948992,0
1118
+ 0.066765234,0.93323475,0
1119
+ 0.040381666,0.95961833,0
1120
+ 0.41675487,0.58324516,0
1121
+ 0.014713737,0.98528624,0
1122
+ 0.99280775,0.007192254,1
1123
+ 0.011845043,0.98815495,0
1124
+ 0.99743444,0.0025655627,1
1125
+ 0.16600418,0.8339958,0
1126
+ 0.9987488,0.0012512207,1
1127
+ 0.99649113,0.0035088658,1
1128
+ 0.10554891,0.8944511,0
1129
+ 0.006466265,0.99353373,0
1130
+ 0.9909072,0.009092808,1
1131
+ 0.005523557,0.99447644,0
1132
+ 0.75607914,0.24392086,1
1133
+ 0.14364703,0.856353,0
1134
+ 0.011028931,0.98897105,0
1135
+ 0.6524593,0.34754068,1
1136
+ 0.025872411,0.9741276,0
1137
+ 0.00706426,0.9929357,0
1138
+ 0.07479455,0.92520547,0
1139
+ 0.1657074,0.8342926,0
1140
+ 0.005833655,0.9941664,0
1141
+ 0.005355295,0.9946447,0
1142
+ 0.2920527,0.7079473,0
1143
+ 0.016586432,0.9834136,0
1144
+ 0.016409565,0.9835904,0
1145
+ 0.0076001384,0.9923999,0
1146
+ 0.0063760076,0.993624,0
1147
+ 0.022196086,0.9778039,0
1148
+ 0.38600442,0.61399555,0
1149
+ 0.99939144,0.0006085634,1
1150
+ 0.0034464216,0.9965536,0
1151
+ 0.361216,0.638784,0
1152
+ 0.99737984,0.0026201606,1
1153
+ 0.9988889,0.0011110902,1
1154
+ 0.017089987,0.98291004,0
1155
+ 0.12927955,0.87072045,0
1156
+ 0.0119556505,0.9880443,0
1157
+ 0.010302323,0.9896977,0
1158
+ 0.020978624,0.9790214,0
1159
+ 0.005982434,0.99401754,0
1160
+ 0.8412838,0.1587162,1
1161
+ 0.9988533,0.0011466742,1
1162
+ 0.9669735,0.033026516,1
1163
+ 0.03497836,0.9650216,0
1164
+ 0.98466706,0.015332937,1
1165
+ 0.97631705,0.023682952,1
1166
+ 0.97856927,0.02143073,1
1167
+ 0.0048057255,0.99519426,0
1168
+ 0.06342308,0.9365769,0
1169
+ 0.99826235,0.0017376542,1
1170
+ 0.15364024,0.8463597,0
1171
+ 0.40021303,0.599787,0
1172
+ 0.0041179643,0.99588203,0
1173
+ 0.075168215,0.9248318,0
1174
+ 0.9788011,0.021198928,1
1175
+ 0.99336797,0.00663203,1
1176
+ 0.010127983,0.98987204,0
1177
+ 0.024760079,0.97523993,0
1178
+ 0.039081942,0.96091807,0
1179
+ 0.050570976,0.94942904,0
1180
+ 0.0043589063,0.9956411,0
1181
+ 0.05382902,0.946171,0
1182
+ 0.99868125,0.0013187528,1
1183
+ 0.022125728,0.9778743,0
1184
+ 0.027055407,0.9729446,0
1185
+ 0.010466004,0.989534,0
1186
+ 0.8875537,0.11244631,1
1187
+ 0.7485318,0.25146818,1
1188
+ 0.020889668,0.97911036,0
1189
+ 0.91352326,0.08647674,1
1190
+ 0.9941732,0.0058267713,1
1191
+ 0.9896074,0.010392606,1
1192
+ 0.29845682,0.7015432,0
1193
+ 0.9976998,0.0023002028,1
1194
+ 0.9324289,0.0675711,1
1195
+ 0.95450217,0.045497835,1
1196
+ 0.020260785,0.9797392,0
1197
+ 0.07450577,0.92549425,0
1198
+ 0.016774233,0.98322576,0
1199
+ 0.9910937,0.008906305,1
1200
+ 0.9993247,0.0006753206,1
1201
+ 0.9784963,0.021503687,1
1202
+ 0.01813573,0.9818643,0
1203
+ 0.024274234,0.97572577,0
1204
+ 0.7654162,0.2345838,1
1205
+ 0.054364182,0.9456358,0
1206
+ 0.00689358,0.9931064,0
1207
+ 0.9892747,0.010725319,1
1208
+ 0.035685148,0.9643149,0
1209
+ 0.026724782,0.97327524,0
1210
+ 0.0061561246,0.99384385,0
1211
+ 0.016497921,0.9835021,0
1212
+ 0.2126436,0.7873564,0
1213
+ 0.028352933,0.9716471,0
1214
+ 0.9969298,0.0030701756,1
1215
+ 0.06494459,0.93505543,0
1216
+ 0.022030085,0.9779699,0
1217
+ 0.019680664,0.9803193,0
1218
+ 0.7809173,0.21908271,1
1219
+ 0.010819897,0.9891801,0
1220
+ 0.9282383,0.07176173,1
1221
+ 0.11294328,0.8870567,0
1222
+ 0.035495106,0.9645049,0
1223
+ 0.98323613,0.016763866,1
1224
+ 0.9990907,0.00090932846,1
1225
+ 0.9861849,0.013815105,1
1226
+ 0.95013136,0.049868643,1
1227
+ 0.9784289,0.0215711,1
1228
+ 0.99936444,0.0006355643,1
1229
+ 0.14492655,0.85507345,0
1230
+ 0.02089554,0.97910446,0
1231
+ 0.5666853,0.43331468,1
1232
+ 0.8388569,0.16114312,1
1233
+ 0.99948466,0.00051534176,1
1234
+ 0.32266107,0.67733896,0
1235
+ 0.22613199,0.773868,0
1236
+ 0.9976216,0.0023784041,1
1237
+ 0.017863708,0.9821363,0
1238
+ 0.99812025,0.0018797517,1
1239
+ 0.9253824,0.074617624,1
1240
+ 0.11449779,0.8855022,0
1241
+ 0.79161954,0.20838046,1
1242
+ 0.6034196,0.3965804,1
1243
+ 0.994422,0.0055779815,1
1244
+ 0.987356,0.012643993,1
1245
+ 0.102927394,0.8970726,0
1246
+ 0.99026746,0.009732544,1
1247
+ 0.060831368,0.93916863,0
1248
+ 0.010069541,0.98993045,0
1249
+ 0.06040917,0.9395908,0
1250
+ 0.027976764,0.97202325,0
1251
+ 0.99090844,0.009091556,1
1252
+ 0.90981907,0.09018093,1
1253
+ 0.007927,0.992073,0
1254
+ 0.06442671,0.9355733,0
1255
+ 0.147704,0.852296,0
1256
+ 0.8378683,0.16213173,1
1257
+ 0.3930114,0.6069886,0
1258
+ 0.018256415,0.9817436,0
1259
+ 0.11725734,0.88274264,0
1260
+ 0.021809231,0.9781908,0
1261
+ 0.08261011,0.91738987,0
1262
+ 0.97728467,0.02271533,1
1263
+ 0.17750403,0.82249594,0
1264
+ 0.13400328,0.8659967,0
1265
+ 0.9968172,0.0031828284,1
1266
+ 0.98541033,0.014589667,1
1267
+ 0.009407424,0.9905926,0
1268
+ 0.008011963,0.99198806,0
1269
+ 0.012682398,0.9873176,0
1270
+ 0.9922754,0.007724583,1
1271
+ 0.981888,0.018112004,1
1272
+ 0.99922466,0.0007753372,1
1273
+ 0.6282604,0.37173963,1
1274
+ 0.9976405,0.0023595095,1
1275
+ 0.06869715,0.93130285,0
1276
+ 0.9762063,0.023793697,1
1277
+ 0.016776472,0.98322356,0
1278
+ 0.9418864,0.058113575,1
1279
+ 0.14973447,0.8502655,0
1280
+ 0.031702943,0.96829706,0
1281
+ 0.9256004,0.07439959,1
1282
+ 0.2665189,0.7334811,0
1283
+ 0.019856807,0.9801432,0
1284
+ 0.89433575,0.10566425,1
1285
+ 0.76636726,0.23363274,1
1286
+ 0.8587461,0.14125389,1
1287
+ 0.99874324,0.0012567639,1
1288
+ 0.99191463,0.00808537,1
1289
+ 0.08815202,0.91184795,0
1290
+ 0.081320964,0.91867906,0
1291
+ 0.54173625,0.45826375,1
1292
+ 0.008328182,0.9916718,0
1293
+ 0.07964335,0.92035663,0
1294
+ 0.059369482,0.9406305,0
1295
+ 0.014795463,0.9852045,0
1296
+ 0.05203814,0.94796187,0
1297
+ 0.73595935,0.26404065,1
1298
+ 0.01779737,0.98220265,0
1299
+ 0.9566205,0.043379486,1
1300
+ 0.9421916,0.0578084,1
1301
+ 0.22871657,0.77128345,0
1302
+ 0.99752265,0.0024773479,1
1303
+ 0.7581353,0.24186468,1
1304
+ 0.8499992,0.15000081,1
1305
+ 0.038413547,0.9615865,0
1306
+ 0.08642905,0.91357094,0
1307
+ 0.045731783,0.9542682,0
1308
+ 0.0058042263,0.99419576,0
1309
+ 0.77016866,0.22983134,1
1310
+ 0.02571982,0.9742802,0
1311
+ 0.7330806,0.26691937,1
1312
+ 0.013069112,0.9869309,0
1313
+ 0.08873848,0.9112615,0
1314
+ 0.94620895,0.053791046,1
1315
+ 0.5662563,0.43374372,1
1316
+ 0.99929786,0.0007021427,1
1317
+ 0.16649425,0.83350575,0
1318
+ 0.99830794,0.0016920567,1
1319
+ 0.9986922,0.0013077855,1
1320
+ 0.9215894,0.078410625,1
1321
+ 0.031192193,0.9688078,0
1322
+ 0.996232,0.0037680268,1
1323
+ 0.007467094,0.9925329,0
1324
+ 0.022584517,0.9774155,0
1325
+ 0.999602,0.0003979802,1
1326
+ 0.16674419,0.8332558,0
1327
+ 0.009180919,0.9908191,0
1328
+ 0.053258955,0.94674104,0
1329
+ 0.055108435,0.9448916,0
1330
+ 0.0040962533,0.99590373,0
1331
+ 0.0057646777,0.99423534,0
1332
+ 0.6833348,0.31666517,1
1333
+ 0.0064416965,0.9935583,0
1334
+ 0.99925417,0.0007458329,1
1335
+ 0.9962142,0.003785789,1
1336
+ 0.45586553,0.5441345,0
1337
+ 0.9910624,0.008937597,1
1338
+ 0.021676749,0.9783232,0
1339
+ 0.9927651,0.0072348714,1
1340
+ 0.0062886146,0.9937114,0
1341
+ 0.02173954,0.97826046,0
1342
+ 0.9910812,0.008918822,1
1343
+ 0.017022233,0.98297775,0
1344
+ 0.9968066,0.0031933784,1
1345
+ 0.9444267,0.055573285,1
1346
+ 0.9955771,0.004422903,1
1347
+ 0.025876896,0.9741231,0
1348
+ 0.84468514,0.15531486,1
1349
+ 0.98764104,0.0123589635,1
1350
+ 0.041982997,0.958017,0
1351
+ 0.9668701,0.03312987,1
1352
+ 0.9927254,0.0072746277,1
1353
+ 0.81021255,0.18978745,1
1354
+ 0.0039480305,0.99605197,0
1355
+ 0.9966804,0.003319621,1
1356
+ 0.02658584,0.9734142,0
1357
+ 0.008913195,0.9910868,0
1358
+ 0.48995256,0.51004744,0
1359
+ 0.01619497,0.98380506,0
1360
+ 0.8158856,0.1841144,1
1361
+ 0.015672062,0.9843279,0
1362
+ 0.23786175,0.76213825,0
1363
+ 0.9344621,0.06553793,1
1364
+ 0.3903679,0.60963213,0
1365
+ 0.98095345,0.019046545,1
1366
+ 0.99662787,0.0033721328,1
1367
+ 0.99536383,0.0046361685,1
1368
+ 0.99891305,0.0010869503,1
1369
+ 0.9992229,0.00077712536,1
1370
+ 0.9984623,0.0015376806,1
1371
+ 0.98494184,0.01505816,1
1372
+ 0.6666944,0.3333056,1
1373
+ 0.030357603,0.9696424,0
1374
+ 0.037724018,0.962276,0
1375
+ 0.98852074,0.011479259,1
1376
+ 0.9913742,0.008625805,1
1377
+ 0.08803509,0.9119649,0
1378
+ 0.98608357,0.013916433,1
1379
+ 0.15481658,0.84518343,0
1380
+ 0.9986959,0.00130409,1
1381
+ 0.039063603,0.9609364,0
1382
+ 0.981058,0.018941998,1
1383
+ 0.95552135,0.044478655,1
1384
+ 0.99657154,0.0034284592,1
1385
+ 0.96582574,0.034174263,1
1386
+ 0.12840837,0.8715916,0
1387
+ 0.06750326,0.9324967,0
1388
+ 0.008044997,0.991955,0
1389
+ 0.92346525,0.07653475,1
1390
+ 0.0076527144,0.9923473,0
1391
+ 0.9366683,0.06333172,1
1392
+ 0.993299,0.0067009926,1
1393
+ 0.8213141,0.1786859,1
1394
+ 0.017613374,0.98238665,0
1395
+ 0.9820873,0.017912686,1
1396
+ 0.99616903,0.0038309693,1
1397
+ 0.005217338,0.9947827,0
1398
+ 0.14317794,0.8568221,0
1399
+ 0.98979735,0.010202646,1
1400
+ 0.98276997,0.017230034,1
1401
+ 0.02363786,0.97636217,0
1402
+ 0.9993363,0.0006636977,1
1403
+ 0.0060686166,0.9939314,0
1404
+ 0.0069341217,0.9930659,0
1405
+ 0.12500702,0.87499297,0
1406
+ 0.9976876,0.0023124218,1
1407
+ 0.032320447,0.96767956,0
1408
+ 0.9932267,0.006773293,1
1409
+ 0.9993524,0.00064760447,1
1410
+ 0.017723538,0.98227644,0
1411
+ 0.99930847,0.0006915331,1
1412
+ 0.026786294,0.97321373,0
1413
+ 0.9953811,0.004618883,1
1414
+ 0.05602691,0.94397306,0
1415
+ 0.93900746,0.06099254,1
1416
+ 0.06609331,0.9339067,0
1417
+ 0.9992368,0.0007631779,1
1418
+ 0.0047274693,0.9952725,0
1419
+ 0.0035287414,0.9964713,0
1420
+ 0.010133721,0.98986626,0
1421
+ 0.99950624,0.0004937649,1
1422
+ 0.99518245,0.0048175454,1
1423
+ 0.85900867,0.14099133,1
1424
+ 0.013674246,0.98632574,0
1425
+ 0.4552685,0.5447315,0
1426
+ 0.6273271,0.37267292,1
1427
+ 0.634135,0.365865,1
1428
+ 0.025024055,0.97497594,0
1429
+ 0.9986765,0.0013235211,1
1430
+ 0.9925915,0.0074084997,1
1431
+ 0.031512488,0.9684875,0
1432
+ 0.031667393,0.9683326,0
1433
+ 0.98877084,0.011229157,1
1434
+ 0.11138903,0.88861096,0
1435
+ 0.018551039,0.98144895,0
1436
+ 0.1099385,0.8900615,0
1437
+ 0.97109264,0.028907359,1
1438
+ 0.99762803,0.0023719668,1
1439
+ 0.033481557,0.96651846,0
1440
+ 0.3520394,0.6479606,0
1441
+ 0.9906474,0.009352624,1
1442
+ 0.991323,0.008677006,1
1443
+ 0.9975407,0.0024592876,1
1444
+ 0.1808514,0.8191486,0
1445
+ 0.98764414,0.012355864,1
1446
+ 0.3070029,0.6929971,0
1447
+ 0.74905807,0.25094193,1
1448
+ 0.9585725,0.041427493,1
1449
+ 0.13658333,0.8634167,0
1450
+ 0.99799156,0.002008438,1
1451
+ 0.005342166,0.9946578,0
1452
+ 0.2853669,0.7146331,0
1453
+ 0.045085136,0.95491487,0
1454
+ 0.17808905,0.821911,0
1455
+ 0.9969331,0.0030668974,1
1456
+ 0.9803248,0.019675195,1
1457
+ 0.013801489,0.9861985,0
1458
+ 0.99591994,0.004080057,1
1459
+ 0.99159765,0.008402348,1
1460
+ 0.92114025,0.07885975,1
1461
+ 0.009800484,0.9901995,0
1462
+ 0.9970572,0.0029428005,1
1463
+ 0.966618,0.033382,1
1464
+ 0.012980941,0.98701906,0
1465
+ 0.020350644,0.97964936,0
1466
+ 0.99604213,0.0039578676,1
1467
+ 0.0130906375,0.9869094,0
1468
+ 0.1727994,0.8272006,0
1469
+ 0.3974163,0.6025837,0
1470
+ 0.008056974,0.991943,0
1471
+ 0.99847955,0.0015204549,1
1472
+ 0.03023014,0.96976984,0
1473
+ 0.99732983,0.0026701689,1
1474
+ 0.011600603,0.9883994,0
1475
+ 0.017608877,0.9823911,0
1476
+ 0.0065057212,0.9934943,0
1477
+ 0.9989127,0.0010873079,1
1478
+ 0.012923739,0.9870763,0
1479
+ 0.99912506,0.0008749366,1
1480
+ 0.711822,0.28817803,1
1481
+ 0.23732215,0.76267785,0
1482
+ 0.01752919,0.9824708,0
1483
+ 0.89879215,0.10120785,1
1484
+ 0.9992508,0.0007491708,1
1485
+ 0.9985765,0.0014234781,1
1486
+ 0.099058025,0.90094197,0
1487
+ 0.65267843,0.34732157,1
1488
+ 0.011939011,0.988061,0
1489
+ 0.9963329,0.0036671162,1
1490
+ 0.032201234,0.96779877,0
1491
+ 0.73343045,0.26656955,1
1492
+ 0.99959856,0.00040143728,1
1493
+ 0.018501587,0.9814984,0
1494
+ 0.92960215,0.070397854,1
1495
+ 0.005352156,0.99464786,0
1496
+ 0.05473804,0.94526196,0
1497
+ 0.8172234,0.18277657,1
1498
+ 0.06750265,0.9324974,0
1499
+ 0.97676474,0.023235261,1
1500
+ 0.9986656,0.0013344288,1
1501
+ 0.9985039,0.0014960766,1
1502
+ 0.005292988,0.994707,0
1503
+ 0.07375611,0.9262439,0
1504
+ 0.9002514,0.09974861,1
1505
+ 0.9892237,0.010776281,1
1506
+ 0.022156762,0.9778432,0
1507
+ 0.010607737,0.9893923,0
1508
+ 0.008308782,0.99169123,0
1509
+ 0.0063182046,0.9936818,0
1510
+ 0.9971814,0.0028185844,1
1511
+ 0.99827003,0.0017299652,1
1512
+ 0.98925215,0.01074785,1
1513
+ 0.0118042,0.9881958,0
1514
+ 0.070666924,0.9293331,0
1515
+ 0.92634267,0.073657334,1
1516
+ 0.99801993,0.0019800663,1
1517
+ 0.005681843,0.9943181,0
1518
+ 0.99799275,0.002007246,1
1519
+ 0.96417665,0.035823345,1
1520
+ 0.007903477,0.99209654,0
1521
+ 0.9944728,0.0055271983,1
1522
+ 0.01692005,0.98307997,0
1523
+ 0.9976041,0.002395928,1
1524
+ 0.030179065,0.9698209,0
1525
+ 0.035560325,0.9644397,0
1526
+ 0.9977952,0.0022047758,1
1527
+ 0.98884225,0.011157751,1
1528
+ 0.027943589,0.9720564,0
1529
+ 0.09933351,0.9006665,0
1530
+ 0.005255597,0.9947444,0
1531
+ 0.9890809,0.010919094,1
1532
+ 0.008858133,0.99114186,0
1533
+ 0.971458,0.028541982,1
1534
+ 0.9954934,0.004506588,1
1535
+ 0.14727719,0.8527228,0
1536
+ 0.995262,0.004737973,1
1537
+ 0.0547841,0.9452159,0
1538
+ 0.9983998,0.0016002059,1
1539
+ 0.970763,0.029236972,1
1540
+ 0.6435678,0.3564322,1
1541
+ 0.99504083,0.004959166,1
1542
+ 0.0041003833,0.9958996,0
1543
+ 0.90253276,0.09746724,1
1544
+ 0.89801,0.101989985,1
1545
+ 0.09505517,0.90494484,0
1546
+ 0.020008063,0.9799919,0
1547
+ 0.010442632,0.9895574,0
1548
+ 0.83515763,0.16484237,1
1549
+ 0.053632632,0.9463674,0
1550
+ 0.010802641,0.9891974,0
1551
+ 0.029274115,0.9707259,0
1552
+ 0.057504263,0.94249576,0
1553
+ 0.04912152,0.9508785,0
1554
+ 0.9992136,0.0007864237,1
1555
+ 0.9520346,0.047965407,1
1556
+ 0.9992085,0.0007914901,1
1557
+ 0.0058381474,0.99416184,0
1558
+ 0.075708784,0.9242912,0
1559
+ 0.21511449,0.7848855,0
1560
+ 0.032300383,0.96769965,0
1561
+ 0.17907566,0.82092434,0
1562
+ 0.007411579,0.9925884,0
1563
+ 0.020384906,0.9796151,0
1564
+ 0.9753118,0.024688184,1
1565
+ 0.99156624,0.008433759,1
1566
+ 0.0124358265,0.98756415,0
1567
+ 0.99756587,0.0024341345,1
1568
+ 0.021502186,0.9784978,0
1569
+ 0.88626266,0.113737345,1
1570
+ 0.76407695,0.23592305,1
1571
+ 0.97689307,0.023106933,1
1572
+ 0.029756326,0.9702437,0
1573
+ 0.99370474,0.006295264,1
1574
+ 0.9981596,0.0018404126,1
1575
+ 0.99760675,0.0023932457,1
1576
+ 0.77559066,0.22440934,1
1577
+ 0.25312236,0.7468777,0
1578
+ 0.9960812,0.0039188266,1
1579
+ 0.6894145,0.3105855,1
1580
+ 0.013673185,0.9863268,0
1581
+ 0.9968112,0.003188789,1
1582
+ 0.9950671,0.0049328804,1
1583
+ 0.9900877,0.009912312,1
1584
+ 0.08846605,0.91153395,0
1585
+ 0.99676526,0.003234744,1
1586
+ 0.9624597,0.037540317,1
1587
+ 0.118853085,0.8811469,0
1588
+ 0.9684787,0.03152132,1
1589
+ 0.9979791,0.0020208955,1
1590
+ 0.033438563,0.96656144,0
1591
+ 0.0068343817,0.9931656,0
1592
+ 0.009964491,0.99003553,0
1593
+ 0.07983351,0.9201665,0
1594
+ 0.8975734,0.10242659,1
1595
+ 0.9919624,0.008037627,1
1596
+ 0.9954579,0.0045421124,1
1597
+ 0.9890939,0.0109061,1
1598
+ 0.94456416,0.055435836,1
1599
+ 0.97874373,0.021256268,1
1600
+ 0.6932526,0.30674738,1
1601
+ 0.0047641676,0.99523586,0
1602
+ 0.05521396,0.944786,0
1603
+ 0.040546075,0.95945394,0
1604
+ 0.99900335,0.0009966493,1
1605
+ 0.02840234,0.9715977,0
1606
+ 0.005851852,0.99414814,0
1607
+ 0.9069033,0.09309667,1
1608
+ 0.99039334,0.009606659,1
1609
+ 0.011555906,0.9884441,0
1610
+ 0.99448663,0.00551337,1
1611
+ 0.55790335,0.44209665,1
1612
+ 0.01775969,0.9822403,0
1613
+ 0.99652714,0.0034728646,1
1614
+ 0.010853602,0.9891464,0
1615
+ 0.98448193,0.015518069,1
1616
+ 0.99271894,0.007281065,1
1617
+ 0.0050981804,0.99490184,0
1618
+ 0.07518264,0.9248174,0
1619
+ 0.80737454,0.19262546,1
1620
+ 0.06079625,0.93920374,0
1621
+ 0.06043017,0.93956983,0
1622
+ 0.13720433,0.86279565,0
1623
+ 0.99843746,0.0015625358,1
1624
+ 0.020197738,0.97980225,0
1625
+ 0.9992161,0.0007839203,1
1626
+ 0.1079029,0.8920971,0
1627
+ 0.0089174,0.9910826,0
1628
+ 0.021822346,0.97817767,0
1629
+ 0.14984296,0.850157,0
1630
+ 0.0915699,0.9084301,0
1631
+ 0.0051686014,0.9948314,0
1632
+ 0.9131387,0.08686131,1
1633
+ 0.61736506,0.38263494,1
1634
+ 0.019656455,0.9803435,0
1635
+ 0.99917275,0.00082725286,1
1636
+ 0.9983675,0.0016325116,1
1637
+ 0.024805803,0.9751942,0
1638
+ 0.9956131,0.004386902,1
1639
+ 0.99850476,0.0014952421,1
1640
+ 0.998782,0.0012180209,1
1641
+ 0.90134686,0.09865314,1
1642
+ 0.015471149,0.98452884,0
1643
+ 0.030658495,0.9693415,0
1644
+ 0.031322084,0.96867794,0
1645
+ 0.9720267,0.027973294,1
1646
+ 0.07616925,0.92383075,0
1647
+ 0.014741097,0.9852589,0
1648
+ 0.099296935,0.9007031,0
1649
+ 0.02173558,0.9782644,0
1650
+ 0.025727566,0.97427243,0
1651
+ 0.96758133,0.03241867,1
1652
+ 0.8201276,0.1798724,1
1653
+ 0.010794832,0.9892052,0
1654
+ 0.030246936,0.9697531,0
1655
+ 0.008092318,0.99190766,0
1656
+ 0.020753695,0.9792463,0
1657
+ 0.573512,0.42648798,1
1658
+ 0.98178506,0.018214941,1
1659
+ 0.047036655,0.95296335,0
1660
+ 0.0050354614,0.99496454,0
1661
+ 0.004526257,0.99547374,0
1662
+ 0.99930215,0.0006978512,1
1663
+ 0.886365,0.113635,1
1664
+ 0.06334041,0.9366596,0
1665
+ 0.99786335,0.0021366477,1
1666
+ 0.11683577,0.8831642,0
1667
+ 0.99886996,0.0011300445,1
1668
+ 0.978961,0.02103901,1
1669
+ 0.012954098,0.9870459,0
1670
+ 0.9875871,0.012412906,1
1671
+ 0.003496894,0.9965031,0
1672
+ 0.023689218,0.9763108,0
1673
+ 0.0067625125,0.9932375,0
1674
+ 0.45915174,0.54084826,0
1675
+ 0.9920785,0.007921517,1
1676
+ 0.9994742,0.0005257726,1
1677
+ 0.0038445813,0.99615544,0
1678
+ 0.012535556,0.9874644,0
1679
+ 0.9855621,0.014437914,1
1680
+ 0.9986211,0.0013788939,1
1681
+ 0.086519144,0.9134809,0
1682
+ 0.98972744,0.0102725625,1
1683
+ 0.9705731,0.029426873,1
1684
+ 0.16202147,0.83797854,0
1685
+ 0.011884432,0.98811555,0
1686
+ 0.92736334,0.072636664,1
1687
+ 0.98239845,0.01760155,1
1688
+ 0.15861382,0.8413862,0
1689
+ 0.9331693,0.066830695,1
1690
+ 0.98442024,0.01557976,1
1691
+ 0.023287563,0.97671247,0
1692
+ 0.9178193,0.08218068,1
1693
+ 0.0054920265,0.99450797,0
1694
+ 0.0042315754,0.9957684,0
1695
+ 0.012958983,0.987041,0
1696
+ 0.7734977,0.2265023,1
1697
+ 0.049814884,0.9501851,0
1698
+ 0.013184402,0.9868156,0
1699
+ 0.067746624,0.93225336,0
1700
+ 0.24456331,0.75543666,0
1701
+ 0.9514273,0.04857272,1
1702
+ 0.99941635,0.0005836487,1
1703
+ 0.009484661,0.99051535,0
1704
+ 0.9987889,0.0012111068,1
1705
+ 0.04994472,0.9500553,0
1706
+ 0.99732876,0.0026712418,1
1707
+ 0.99280447,0.0071955323,1
1708
+ 0.07044411,0.9295559,0
1709
+ 0.042651065,0.95734894,0
1710
+ 0.011416795,0.9885832,0
1711
+ 0.99950373,0.0004962683,1
1712
+ 0.0680406,0.9319594,0
1713
+ 0.58770794,0.41229206,1
1714
+ 0.9983559,0.0016440749,1
1715
+ 0.9995726,0.0004274249,1
1716
+ 0.04024145,0.9597585,0
1717
+ 0.99895155,0.0010484457,1
1718
+ 0.0084200455,0.99157995,0
1719
+ 0.19051927,0.8094807,0
1720
+ 0.022887666,0.97711235,0
1721
+ 0.09325422,0.9067458,0
1722
+ 0.021836378,0.9781636,0
1723
+ 0.9988059,0.0011941195,1
1724
+ 0.052424587,0.9475754,0
1725
+ 0.025624903,0.97437507,0
1726
+ 0.7933257,0.20667428,1
1727
+ 0.011938736,0.98806125,0
1728
+ 0.9955056,0.0044944286,1
1729
+ 0.0073647965,0.9926352,0
1730
+ 0.050186045,0.94981396,0
1731
+ 0.20434926,0.7956507,0
1732
+ 0.0237731,0.9762269,0
1733
+ 0.47285873,0.5271413,0
1734
+ 0.017290493,0.9827095,0
1735
+ 0.021489127,0.97851086,0
1736
+ 0.054595277,0.9454047,0
1737
+ 0.23948076,0.76051927,0
1738
+ 0.010707215,0.9892928,0
1739
+ 0.9973345,0.0026655197,1
1740
+ 0.015417775,0.98458225,0
1741
+ 0.9183022,0.08169782,1
1742
+ 0.54850245,0.45149755,1
1743
+ 0.014334148,0.98566586,0
1744
+ 0.95610726,0.04389274,1
1745
+ 0.014037047,0.9859629,0
1746
+ 0.004278304,0.9957217,0
1747
+ 0.06173338,0.93826663,0
1748
+ 0.9991573,0.00084269047,1
1749
+ 0.012548784,0.9874512,0
1750
+ 0.99873155,0.0012684464,1
1751
+ 0.0074725593,0.9925274,0
1752
+ 0.015871348,0.98412865,0
1753
+ 0.92453617,0.07546383,1
1754
+ 0.83135206,0.16864794,1
1755
+ 0.26286265,0.7371373,0
1756
+ 0.028553113,0.9714469,0
1757
+ 0.021172833,0.9788272,0
1758
+ 0.113045596,0.8869544,0
1759
+ 0.9987753,0.0012246966,1
1760
+ 0.9946339,0.005366087,1
1761
+ 0.0060559004,0.9939441,0
1762
+ 0.42132226,0.5786778,0
1763
+ 0.014075918,0.98592407,0
1764
+ 0.99731666,0.0026833415,1
1765
+ 0.005444145,0.99455583,0
1766
+ 0.007352509,0.99264747,0
1767
+ 0.9960438,0.0039561987,1
1768
+ 0.024426164,0.97557384,0
1769
+ 0.0070765726,0.99292344,0
1770
+ 0.020988919,0.97901106,0
1771
+ 0.019429492,0.9805705,0
1772
+ 0.0057123387,0.99428767,0
1773
+ 0.99329597,0.0067040324,1
1774
+ 0.9993587,0.0006412864,1
1775
+ 0.99847776,0.001522243,1
1776
+ 0.9986659,0.0013340712,1
1777
+ 0.9957604,0.004239619,1
1778
+ 0.031886797,0.9681132,0
1779
+ 0.99574655,0.004253447,1
1780
+ 0.9415316,0.0584684,1
1781
+ 0.28048956,0.71951044,0
1782
+ 0.0043643955,0.9956356,0
1783
+ 0.9983614,0.0016385913,1
1784
+ 0.16831097,0.831689,0
1785
+ 0.9924442,0.007555783,1
1786
+ 0.014420041,0.98557997,0
1787
+ 0.99888676,0.001113236,1
1788
+ 0.3755411,0.6244589,0
1789
+ 0.015409193,0.9845908,0
1790
+ 0.99082303,0.0091769695,1
1791
+ 0.04530391,0.95469606,0
1792
+ 0.9994392,0.0005608201,1
1793
+ 0.038213592,0.9617864,0
1794
+ 0.0056062816,0.9943937,0
1795
+ 0.99951696,0.00048303604,1
1796
+ 0.9991761,0.000823915,1
1797
+ 0.98515505,0.014844954,1
1798
+ 0.0070461244,0.9929539,0
1799
+ 0.99940324,0.0005967617,1
1800
+ 0.9960348,0.003965199,1
1801
+ 0.9991653,0.00083470345,1
1802
+ 0.057069167,0.9429308,0
1803
+ 0.0138158025,0.9861842,0
1804
+ 0.004012408,0.9959876,0
1805
+ 0.991383,0.008616984,1
1806
+ 0.19775105,0.80224895,0
1807
+ 0.9566522,0.043347776,1
1808
+ 0.9809348,0.019065201,1
1809
+ 0.031833686,0.9681663,0
1810
+ 0.004440362,0.99555963,0
1811
+ 0.038287334,0.96171266,0
1812
+ 0.010088782,0.9899112,0
1813
+ 0.9989691,0.0010309219,1
1814
+ 0.4290963,0.57090366,0
1815
+ 0.015290285,0.98470974,0
1816
+ 0.9947272,0.0052728057,1
1817
+ 0.042639606,0.9573604,0
1818
+ 0.02243663,0.9775634,0
1819
+ 0.010076568,0.9899234,0
1820
+ 0.01916103,0.98083895,0
1821
+ 0.015725534,0.98427445,0
1822
+ 0.012475518,0.9875245,0
examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/res/output/test_prediction_epoch_2.csv ADDED
@@ -0,0 +1,1822 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ prob_1,prob_0,prediction
2
+ 0.008024715,0.9919753,0
3
+ 0.033999242,0.96600074,0
4
+ 0.0025903261,0.9974097,0
5
+ 0.0060658306,0.99393415,0
6
+ 0.99957186,0.00042814016,1
7
+ 0.99955064,0.00044935942,1
8
+ 0.16471289,0.8352871,0
9
+ 0.9962871,0.0037128925,1
10
+ 0.04706095,0.95293903,0
11
+ 0.00585209,0.9941479,0
12
+ 0.5987578,0.4012422,1
13
+ 0.0026431028,0.9973569,0
14
+ 0.77245414,0.22754586,1
15
+ 0.96217763,0.037822366,1
16
+ 0.0018891735,0.99811083,0
17
+ 0.99759054,0.0024094582,1
18
+ 0.59230965,0.40769035,1
19
+ 0.009482256,0.99051774,0
20
+ 0.008205369,0.99179465,0
21
+ 0.4141345,0.5858655,0
22
+ 0.9993924,0.00060760975,1
23
+ 0.016137112,0.9838629,0
24
+ 0.90918106,0.09081894,1
25
+ 0.9995726,0.0004274249,1
26
+ 0.002226675,0.99777335,0
27
+ 0.00525245,0.9947476,0
28
+ 0.022973688,0.9770263,0
29
+ 0.9985833,0.0014166832,1
30
+ 0.010945197,0.9890548,0
31
+ 0.99885094,0.0011490583,1
32
+ 0.99382824,0.006171763,1
33
+ 0.0032408407,0.9967592,0
34
+ 0.9997002,0.00029981136,1
35
+ 0.9975326,0.0024673939,1
36
+ 0.99966264,0.0003373623,1
37
+ 0.9522827,0.047717273,1
38
+ 0.9937105,0.006289482,1
39
+ 0.8826388,0.11736119,1
40
+ 0.12782405,0.87217593,0
41
+ 0.0028826322,0.99711734,0
42
+ 0.9993905,0.0006095171,1
43
+ 0.99687684,0.0031231642,1
44
+ 0.0040410934,0.9959589,0
45
+ 0.076171614,0.92382836,0
46
+ 0.0013930652,0.9986069,0
47
+ 0.99966323,0.00033676624,1
48
+ 0.9957125,0.0042874813,1
49
+ 0.6279597,0.37204027,1
50
+ 0.00093862566,0.99906135,0
51
+ 0.34617144,0.65382856,0
52
+ 0.0016487576,0.9983512,0
53
+ 0.99955314,0.00044685602,1
54
+ 0.9928681,0.0071318746,1
55
+ 0.0015424386,0.99845755,0
56
+ 0.0019810258,0.998019,0
57
+ 0.9992393,0.0007606745,1
58
+ 0.013872191,0.9861278,0
59
+ 0.83484846,0.16515154,1
60
+ 0.10275519,0.8972448,0
61
+ 0.0347495,0.9652505,0
62
+ 0.99967694,0.00032305717,1
63
+ 0.009126423,0.9908736,0
64
+ 0.0024714547,0.99752855,0
65
+ 0.47708043,0.52291954,0
66
+ 0.9983954,0.0016046166,1
67
+ 0.93675685,0.06324315,1
68
+ 0.9995258,0.00047421455,1
69
+ 0.001423483,0.9985765,0
70
+ 0.002211218,0.9977888,0
71
+ 0.26197323,0.73802674,0
72
+ 0.9993405,0.0006595254,1
73
+ 0.014462363,0.98553765,0
74
+ 0.020691972,0.979308,0
75
+ 0.99809986,0.0019001365,1
76
+ 0.9845367,0.015463293,1
77
+ 0.8598121,0.14018792,1
78
+ 0.9989649,0.0010350943,1
79
+ 0.41990903,0.580091,0
80
+ 0.4299857,0.5700143,0
81
+ 0.0019925914,0.9980074,0
82
+ 0.004203653,0.9957963,0
83
+ 0.004465272,0.9955347,0
84
+ 0.0076424414,0.99235755,0
85
+ 0.0009788004,0.9990212,0
86
+ 0.27272886,0.72727114,0
87
+ 0.97492754,0.025072455,1
88
+ 0.18162753,0.8183725,0
89
+ 0.96468437,0.035315633,1
90
+ 0.0015413484,0.9984586,0
91
+ 0.0030971034,0.9969029,0
92
+ 0.99777955,0.0022204518,1
93
+ 0.999485,0.00051498413,1
94
+ 0.9958253,0.0041747093,1
95
+ 0.22298306,0.77701694,0
96
+ 0.9167152,0.083284795,1
97
+ 0.9989141,0.0010858774,1
98
+ 0.20446056,0.79553944,0
99
+ 0.00450336,0.99549663,0
100
+ 0.0010937038,0.9989063,0
101
+ 0.73686016,0.26313984,1
102
+ 0.99965775,0.00034224987,1
103
+ 0.9996351,0.00036489964,1
104
+ 0.009853591,0.9901464,0
105
+ 0.65051043,0.34948957,1
106
+ 0.01651445,0.9834856,0
107
+ 0.1259884,0.87401164,0
108
+ 0.9820462,0.017953813,1
109
+ 0.9771243,0.022875726,1
110
+ 0.99961436,0.00038564205,1
111
+ 0.054793928,0.94520605,0
112
+ 0.003146836,0.9968532,0
113
+ 0.009380343,0.99061966,0
114
+ 0.1123523,0.8876477,0
115
+ 0.025943281,0.9740567,0
116
+ 0.9954184,0.0045815706,1
117
+ 0.13047196,0.86952806,0
118
+ 0.00094570016,0.9990543,0
119
+ 0.7667618,0.23323822,1
120
+ 0.09564245,0.90435755,0
121
+ 0.9995803,0.0004196763,1
122
+ 0.98852485,0.011475146,1
123
+ 0.6803578,0.3196422,1
124
+ 0.9961152,0.0038847923,1
125
+ 0.9995695,0.00043052435,1
126
+ 0.9786003,0.021399677,1
127
+ 0.15331304,0.84668696,0
128
+ 0.12494081,0.8750592,0
129
+ 0.21749778,0.78250223,0
130
+ 0.03679082,0.96320915,0
131
+ 0.99633396,0.0036660433,1
132
+ 0.0028231763,0.9971768,0
133
+ 0.21104561,0.7889544,0
134
+ 0.012105625,0.98789436,0
135
+ 0.0056054546,0.99439454,0
136
+ 0.79204625,0.20795375,1
137
+ 0.0034631237,0.99653685,0
138
+ 0.8231542,0.17684579,1
139
+ 0.0033388992,0.9966611,0
140
+ 0.9945148,0.005485177,1
141
+ 0.4230918,0.57690823,0
142
+ 0.9995915,0.00040847063,1
143
+ 0.8423024,0.15769762,1
144
+ 0.30220547,0.69779456,0
145
+ 0.0046296455,0.9953703,0
146
+ 0.92944634,0.07055366,1
147
+ 0.009147973,0.990852,0
148
+ 0.98995996,0.010040045,1
149
+ 0.0022147547,0.9977853,0
150
+ 0.99816567,0.001834333,1
151
+ 0.016396482,0.98360354,0
152
+ 0.0029768478,0.99702317,0
153
+ 0.007664351,0.9923357,0
154
+ 0.04278965,0.95721036,0
155
+ 0.9904873,0.0095127225,1
156
+ 0.0015746558,0.99842536,0
157
+ 0.0023894885,0.9976105,0
158
+ 0.99907684,0.00092315674,1
159
+ 0.99939847,0.0006015301,1
160
+ 0.039997656,0.96000236,0
161
+ 0.9996269,0.00037312508,1
162
+ 0.0020892275,0.9979108,0
163
+ 0.8792901,0.120709896,1
164
+ 0.9996172,0.00038278103,1
165
+ 0.99898785,0.0010121465,1
166
+ 0.9992501,0.00074988604,1
167
+ 0.9939282,0.006071806,1
168
+ 0.71113926,0.28886074,1
169
+ 0.9483441,0.05165589,1
170
+ 0.9995865,0.00041347742,1
171
+ 0.0035143076,0.9964857,0
172
+ 0.020729022,0.979271,0
173
+ 0.99966383,0.0003361702,1
174
+ 0.0036127144,0.9963873,0
175
+ 0.004531421,0.99546856,0
176
+ 0.99937797,0.0006220341,1
177
+ 0.99938476,0.00061523914,1
178
+ 0.9893546,0.01064539,1
179
+ 0.9992441,0.0007559061,1
180
+ 0.9991442,0.0008558035,1
181
+ 0.006266671,0.99373335,0
182
+ 0.00202518,0.9979748,0
183
+ 0.989643,0.010357022,1
184
+ 0.9996246,0.00037539005,1
185
+ 0.023301337,0.97669864,0
186
+ 0.95232075,0.047679245,1
187
+ 0.9995136,0.0004863739,1
188
+ 0.0013074222,0.9986926,0
189
+ 0.98622906,0.013770938,1
190
+ 0.078209534,0.9217905,0
191
+ 0.97156423,0.028435767,1
192
+ 0.0031868445,0.9968132,0
193
+ 0.12838906,0.87161094,0
194
+ 0.9966697,0.0033302903,1
195
+ 0.99903333,0.0009666681,1
196
+ 0.0017869292,0.99821305,0
197
+ 0.997736,0.0022640228,1
198
+ 0.99973947,0.0002605319,1
199
+ 0.00259537,0.99740463,0
200
+ 0.99966955,0.00033044815,1
201
+ 0.0033419651,0.996658,0
202
+ 0.9996107,0.00038927794,1
203
+ 0.004029298,0.9959707,0
204
+ 0.0018029279,0.9981971,0
205
+ 0.0048389756,0.995161,0
206
+ 0.9996604,0.00033962727,1
207
+ 0.010074154,0.98992586,0
208
+ 0.60766125,0.39233875,1
209
+ 0.014938116,0.9850619,0
210
+ 0.9801475,0.019852519,1
211
+ 0.9917956,0.008204401,1
212
+ 0.99942845,0.00057154894,1
213
+ 0.99219596,0.007804036,1
214
+ 0.0065542217,0.99344575,0
215
+ 0.98685455,0.013145447,1
216
+ 0.00229501,0.997705,0
217
+ 0.0030769713,0.996923,0
218
+ 0.0014768329,0.9985232,0
219
+ 0.9994356,0.0005643964,1
220
+ 0.002749299,0.9972507,0
221
+ 0.02851177,0.97148824,0
222
+ 0.015150282,0.9848497,0
223
+ 0.76402277,0.23597723,1
224
+ 0.025530776,0.97446924,0
225
+ 0.5592919,0.4407081,1
226
+ 0.9996146,0.00038540363,1
227
+ 0.9878392,0.012160778,1
228
+ 0.9995821,0.00041788816,1
229
+ 0.99871325,0.0012867451,1
230
+ 0.6942037,0.30579633,1
231
+ 0.0011180148,0.998882,0
232
+ 0.0011713181,0.9988287,0
233
+ 0.0042095417,0.9957905,0
234
+ 0.99962854,0.00037145615,1
235
+ 0.9884509,0.011549115,1
236
+ 0.99953353,0.00046646595,1
237
+ 0.9993352,0.0006647706,1
238
+ 0.002077401,0.9979226,0
239
+ 0.99967384,0.00032615662,1
240
+ 0.9639292,0.036070824,1
241
+ 0.9936479,0.0063521266,1
242
+ 0.99873835,0.0012616515,1
243
+ 0.014353319,0.98564667,0
244
+ 0.99932456,0.00067543983,1
245
+ 0.4051097,0.5948903,0
246
+ 0.998871,0.0011289716,1
247
+ 0.61118484,0.38881516,1
248
+ 0.9994173,0.000582695,1
249
+ 0.99600023,0.0039997697,1
250
+ 0.0019472382,0.9980528,0
251
+ 0.9949951,0.005004883,1
252
+ 0.96911454,0.030885458,1
253
+ 0.9018772,0.098122776,1
254
+ 0.99962187,0.00037813187,1
255
+ 0.009466606,0.9905334,0
256
+ 0.00924364,0.99075633,0
257
+ 0.9995129,0.00048708916,1
258
+ 0.00095427607,0.9990457,0
259
+ 0.99944645,0.00055354834,1
260
+ 0.24796529,0.7520347,0
261
+ 0.99970716,0.00029283762,1
262
+ 0.00947588,0.9905241,0
263
+ 0.9994382,0.0005617738,1
264
+ 0.0061375075,0.9938625,0
265
+ 0.0010590985,0.9989409,0
266
+ 0.9995579,0.00044208765,1
267
+ 0.9990287,0.0009713173,1
268
+ 0.0012386893,0.9987613,0
269
+ 0.99953663,0.0004633665,1
270
+ 0.9997179,0.00028210878,1
271
+ 0.9980755,0.0019245148,1
272
+ 0.010735395,0.9892646,0
273
+ 0.018383985,0.981616,0
274
+ 0.002729235,0.99727076,0
275
+ 0.99898976,0.0010102391,1
276
+ 0.9983047,0.0016952753,1
277
+ 0.114580505,0.8854195,0
278
+ 0.011733315,0.9882667,0
279
+ 0.0026048955,0.9973951,0
280
+ 0.9992513,0.00074869394,1
281
+ 0.99082327,0.009176731,1
282
+ 0.9971374,0.0028625727,1
283
+ 0.007417766,0.99258226,0
284
+ 0.99959236,0.00040763617,1
285
+ 0.0041205,0.9958795,0
286
+ 0.4062097,0.5937903,0
287
+ 0.9643337,0.035666287,1
288
+ 0.9990363,0.0009636879,1
289
+ 0.999553,0.00044697523,1
290
+ 0.9967964,0.0032035708,1
291
+ 0.005301568,0.9946984,0
292
+ 0.0021561913,0.9978438,0
293
+ 0.99729615,0.0027038455,1
294
+ 0.013152168,0.9868478,0
295
+ 0.0034324946,0.9965675,0
296
+ 0.0033997954,0.9966002,0
297
+ 0.0033294982,0.9966705,0
298
+ 0.999724,0.0002760291,1
299
+ 0.9996197,0.00038027763,1
300
+ 0.99733776,0.0026622415,1
301
+ 0.0039785346,0.99602145,0
302
+ 0.9994037,0.00059628487,1
303
+ 0.8980345,0.10196549,1
304
+ 0.045035664,0.95496434,0
305
+ 0.99973756,0.00026243925,1
306
+ 0.010275879,0.9897241,0
307
+ 0.12106843,0.8789316,0
308
+ 0.9992549,0.00074511766,1
309
+ 0.06218942,0.9378106,0
310
+ 0.79012406,0.20987594,1
311
+ 0.9981267,0.0018733144,1
312
+ 0.0012296248,0.99877036,0
313
+ 0.9989987,0.0010012984,1
314
+ 0.003773294,0.9962267,0
315
+ 0.99949944,0.0005005598,1
316
+ 0.9900101,0.009989917,1
317
+ 0.034963556,0.96503645,0
318
+ 0.86880654,0.13119346,1
319
+ 0.990209,0.009791017,1
320
+ 0.99951696,0.00048303604,1
321
+ 0.966279,0.03372103,1
322
+ 0.99637836,0.0036216378,1
323
+ 0.9994678,0.0005322099,1
324
+ 0.004520011,0.99548,0
325
+ 0.06923845,0.9307616,0
326
+ 0.001642084,0.9983579,0
327
+ 0.0025899322,0.99741006,0
328
+ 0.0015910631,0.9984089,0
329
+ 0.9765954,0.023404598,1
330
+ 0.0062577156,0.9937423,0
331
+ 0.9992768,0.00072318316,1
332
+ 0.018202573,0.98179746,0
333
+ 0.98731947,0.012680531,1
334
+ 0.015694778,0.9843052,0
335
+ 0.99961275,0.00038725138,1
336
+ 0.93240625,0.06759375,1
337
+ 0.9979405,0.0020595193,1
338
+ 0.002916343,0.99708366,0
339
+ 0.99971193,0.00028806925,1
340
+ 0.10416392,0.89583606,0
341
+ 0.9995933,0.0004066825,1
342
+ 0.2826053,0.7173947,0
343
+ 0.99959797,0.00040203333,1
344
+ 0.026930593,0.9730694,0
345
+ 0.0018067618,0.99819326,0
346
+ 0.9825571,0.017442882,1
347
+ 0.003655664,0.9963443,0
348
+ 0.99952984,0.00047016144,1
349
+ 0.0551745,0.94482553,0
350
+ 0.999554,0.00044602156,1
351
+ 0.08125192,0.9187481,0
352
+ 0.9975044,0.0024955869,1
353
+ 0.9741684,0.02583158,1
354
+ 0.017156322,0.9828437,0
355
+ 0.004197504,0.9958025,0
356
+ 0.025144797,0.9748552,0
357
+ 0.006146458,0.99385357,0
358
+ 0.99957246,0.00042754412,1
359
+ 0.009399919,0.9906001,0
360
+ 0.0022997425,0.9977003,0
361
+ 0.999361,0.0006390214,1
362
+ 0.9672044,0.032795608,1
363
+ 0.31775513,0.6822449,0
364
+ 0.52403694,0.47596306,1
365
+ 0.9893351,0.01066488,1
366
+ 0.019175796,0.98082423,0
367
+ 0.99816847,0.0018315315,1
368
+ 0.052885197,0.9471148,0
369
+ 0.9852469,0.014753103,1
370
+ 0.030102089,0.9698979,0
371
+ 0.010927314,0.9890727,0
372
+ 0.103892006,0.896108,0
373
+ 0.14692806,0.8530719,0
374
+ 0.9995004,0.00049960613,1
375
+ 0.99957305,0.00042694807,1
376
+ 0.43370453,0.5662955,0
377
+ 0.99943095,0.00056904554,1
378
+ 0.032791056,0.9672089,0
379
+ 0.1062588,0.8937412,0
380
+ 0.16095723,0.8390428,0
381
+ 0.17240798,0.827592,0
382
+ 0.020257633,0.97974235,0
383
+ 0.99871814,0.0012818575,1
384
+ 0.0045654895,0.9954345,0
385
+ 0.06800951,0.9319905,0
386
+ 0.51394284,0.48605716,1
387
+ 0.99945396,0.00054603815,1
388
+ 0.0038471422,0.9961529,0
389
+ 0.9995882,0.0004118085,1
390
+ 0.0018014507,0.99819857,0
391
+ 0.9975793,0.0024207234,1
392
+ 0.0072135874,0.9927864,0
393
+ 0.9981653,0.0018346906,1
394
+ 0.98446506,0.015534937,1
395
+ 0.0053486666,0.9946513,0
396
+ 0.30941656,0.69058347,0
397
+ 0.0032396151,0.99676037,0
398
+ 0.00676594,0.99323404,0
399
+ 0.002532125,0.9974679,0
400
+ 0.021019995,0.97898,0
401
+ 0.02226556,0.97773445,0
402
+ 0.017584635,0.9824154,0
403
+ 0.9823284,0.017671585,1
404
+ 0.0020145471,0.9979855,0
405
+ 0.99829894,0.001701057,1
406
+ 0.034837928,0.9651621,0
407
+ 0.9991468,0.0008531809,1
408
+ 0.003163129,0.9968369,0
409
+ 0.0011191635,0.99888086,0
410
+ 0.999387,0.00061297417,1
411
+ 0.99967825,0.00032174587,1
412
+ 0.0019454669,0.9980545,0
413
+ 0.0029875604,0.99701244,0
414
+ 0.9402129,0.059787095,1
415
+ 0.96533114,0.034668863,1
416
+ 0.034187276,0.96581274,0
417
+ 0.06282095,0.937179,0
418
+ 0.010066985,0.989933,0
419
+ 0.99857044,0.0014295578,1
420
+ 0.0011396485,0.99886036,0
421
+ 0.9995925,0.00040751696,1
422
+ 0.99280155,0.007198453,1
423
+ 0.0038620331,0.996138,0
424
+ 0.99951804,0.00048196316,1
425
+ 0.0022008468,0.99779916,0
426
+ 0.0010992258,0.9989008,0
427
+ 0.8780898,0.121910214,1
428
+ 0.005208575,0.99479145,0
429
+ 0.022340855,0.97765917,0
430
+ 0.99968696,0.0003130436,1
431
+ 0.26925468,0.7307453,0
432
+ 0.0035494503,0.99645054,0
433
+ 0.9982084,0.0017915964,1
434
+ 0.5477689,0.4522311,1
435
+ 0.9964244,0.003575623,1
436
+ 0.9588053,0.041194677,1
437
+ 0.9963678,0.0036321878,1
438
+ 0.0048086075,0.9951914,0
439
+ 0.030646043,0.969354,0
440
+ 0.99377316,0.0062268376,1
441
+ 0.01089512,0.98910487,0
442
+ 0.0014585158,0.9985415,0
443
+ 0.0012942336,0.99870574,0
444
+ 0.0034871472,0.99651283,0
445
+ 0.00941354,0.99058646,0
446
+ 0.9994686,0.0005313754,1
447
+ 0.9990829,0.00091707706,1
448
+ 0.06773358,0.9322664,0
449
+ 0.99664307,0.0033569336,1
450
+ 0.7308917,0.2691083,1
451
+ 0.31471753,0.68528247,0
452
+ 0.0035734177,0.9964266,0
453
+ 0.0037977519,0.99620223,0
454
+ 0.002491341,0.99750865,0
455
+ 0.9785356,0.021464407,1
456
+ 0.99029213,0.009707868,1
457
+ 0.9996531,0.00034689903,1
458
+ 0.00677121,0.9932288,0
459
+ 0.99790716,0.0020928383,1
460
+ 0.9980592,0.0019407868,1
461
+ 0.9739973,0.026002705,1
462
+ 0.00335849,0.9966415,0
463
+ 0.9982419,0.0017580986,1
464
+ 0.95185643,0.048143566,1
465
+ 0.0017829754,0.99821705,0
466
+ 0.0354948,0.9645052,0
467
+ 0.039190933,0.96080905,0
468
+ 0.0260152,0.9739848,0
469
+ 0.9366281,0.0633719,1
470
+ 0.9997038,0.00029617548,1
471
+ 0.02558059,0.9744194,0
472
+ 0.016744507,0.9832555,0
473
+ 0.998569,0.0014309883,1
474
+ 0.9992101,0.00078988075,1
475
+ 0.97348624,0.026513755,1
476
+ 0.9996698,0.00033020973,1
477
+ 0.02436176,0.9756383,0
478
+ 0.99963236,0.00036764145,1
479
+ 0.3013801,0.6986199,0
480
+ 0.9975466,0.0024533868,1
481
+ 0.9995678,0.00043219328,1
482
+ 0.9996346,0.00036537647,1
483
+ 0.99930525,0.00069475174,1
484
+ 0.4654372,0.5345628,0
485
+ 0.03268851,0.9673115,0
486
+ 0.0014609888,0.99853903,0
487
+ 0.9996457,0.00035429,1
488
+ 0.9329594,0.06704062,1
489
+ 0.032331914,0.96766806,0
490
+ 0.57708037,0.42291963,1
491
+ 0.26241773,0.73758227,0
492
+ 0.9997205,0.00027948618,1
493
+ 0.9986487,0.0013512969,1
494
+ 0.0014351925,0.9985648,0
495
+ 0.99965036,0.00034964085,1
496
+ 0.99809796,0.0019020438,1
497
+ 0.999624,0.0003759861,1
498
+ 0.0049803318,0.9950197,0
499
+ 0.023156594,0.9768434,0
500
+ 0.07544494,0.92455506,0
501
+ 0.025991112,0.9740089,0
502
+ 0.9160779,0.08392209,1
503
+ 0.0073995325,0.99260044,0
504
+ 0.9996264,0.0003736019,1
505
+ 0.9844267,0.015573323,1
506
+ 0.9996264,0.0003736019,1
507
+ 0.970476,0.029524028,1
508
+ 0.9996834,0.00031661987,1
509
+ 0.0018276264,0.9981724,0
510
+ 0.6749761,0.3250239,1
511
+ 0.9994766,0.0005233884,1
512
+ 0.84444475,0.15555525,1
513
+ 0.0044214986,0.9955785,0
514
+ 0.9995889,0.00041109324,1
515
+ 0.008114209,0.9918858,0
516
+ 0.11637978,0.8836202,0
517
+ 0.014024183,0.9859758,0
518
+ 0.17525955,0.82474047,0
519
+ 0.9996531,0.00034689903,1
520
+ 0.0023516272,0.99764836,0
521
+ 0.0013613052,0.9986387,0
522
+ 0.0048928712,0.9951071,0
523
+ 0.0009916603,0.99900836,0
524
+ 0.017241783,0.9827582,0
525
+ 0.011088401,0.9889116,0
526
+ 0.7684132,0.23158681,1
527
+ 0.05099426,0.9490057,0
528
+ 0.9557131,0.044286907,1
529
+ 0.1972989,0.8027011,0
530
+ 0.033156037,0.96684396,0
531
+ 0.06144287,0.93855715,0
532
+ 0.0064277165,0.9935723,0
533
+ 0.9988702,0.001129806,1
534
+ 0.9987936,0.001206398,1
535
+ 0.999099,0.0009009838,1
536
+ 0.99292374,0.0070762634,1
537
+ 0.0036548474,0.99634516,0
538
+ 0.9996326,0.00036740303,1
539
+ 0.9839516,0.016048372,1
540
+ 0.10006611,0.8999339,0
541
+ 0.99901664,0.0009833574,1
542
+ 0.0037176656,0.99628234,0
543
+ 0.9995809,0.00041908026,1
544
+ 0.039251752,0.96074826,0
545
+ 0.992975,0.0070250034,1
546
+ 0.98804957,0.011950433,1
547
+ 0.0068005132,0.99319947,0
548
+ 0.21867754,0.7813225,0
549
+ 0.9691012,0.03089881,1
550
+ 0.9714517,0.0285483,1
551
+ 0.9995018,0.0004981756,1
552
+ 0.026693275,0.9733067,0
553
+ 0.0018972118,0.9981028,0
554
+ 0.005699472,0.99430054,0
555
+ 0.9993678,0.00063222647,1
556
+ 0.99078345,0.009216547,1
557
+ 0.24928686,0.7507131,0
558
+ 0.9971214,0.0028786063,1
559
+ 0.094992235,0.9050078,0
560
+ 0.0030393691,0.99696064,0
561
+ 0.4448755,0.5551245,0
562
+ 0.0049384376,0.9950616,0
563
+ 0.99963295,0.0003670454,1
564
+ 0.9970221,0.0029779077,1
565
+ 0.99938893,0.0006110668,1
566
+ 0.038718183,0.96128184,0
567
+ 0.99690944,0.0030905604,1
568
+ 0.7950698,0.20493019,1
569
+ 0.007947767,0.99205226,0
570
+ 0.99523205,0.0047679543,1
571
+ 0.99362916,0.0063708425,1
572
+ 0.999482,0.00051802397,1
573
+ 0.99863017,0.001369834,1
574
+ 0.9803904,0.01960957,1
575
+ 0.044861794,0.9551382,0
576
+ 0.0017195629,0.99828047,0
577
+ 0.1544516,0.8455484,0
578
+ 0.0069754324,0.9930246,0
579
+ 0.99911875,0.0008812547,1
580
+ 0.9781778,0.021822214,1
581
+ 0.67327523,0.32672477,1
582
+ 0.99819475,0.0018052459,1
583
+ 0.9996728,0.0003272295,1
584
+ 0.9993063,0.00069367886,1
585
+ 0.9673962,0.0326038,1
586
+ 0.983678,0.016322017,1
587
+ 0.9995521,0.0004479289,1
588
+ 0.0044595306,0.99554044,0
589
+ 0.0049130204,0.99508697,0
590
+ 0.004092734,0.99590725,0
591
+ 0.9994885,0.00051152706,1
592
+ 0.0021635334,0.9978365,0
593
+ 0.04759155,0.95240843,0
594
+ 0.011356402,0.9886436,0
595
+ 0.99876714,0.0012328625,1
596
+ 0.97916,0.02083999,1
597
+ 0.0015812938,0.9984187,0
598
+ 0.99913377,0.0008662343,1
599
+ 0.0069352053,0.9930648,0
600
+ 0.0020668574,0.99793315,0
601
+ 0.017407484,0.9825925,0
602
+ 0.99361163,0.006388366,1
603
+ 0.99648714,0.0035128593,1
604
+ 0.97352886,0.026471138,1
605
+ 0.08205922,0.9179408,0
606
+ 0.0013039161,0.9986961,0
607
+ 0.004427178,0.9955728,0
608
+ 0.407956,0.592044,0
609
+ 0.9605343,0.039465725,1
610
+ 0.0022121188,0.9977879,0
611
+ 0.10666156,0.89333844,0
612
+ 0.0016684684,0.99833155,0
613
+ 0.09434663,0.90565336,0
614
+ 0.969415,0.030584991,1
615
+ 0.0027451862,0.9972548,0
616
+ 0.95455486,0.045445144,1
617
+ 0.004087955,0.995912,0
618
+ 0.9976845,0.0023155212,1
619
+ 0.26126015,0.73873985,0
620
+ 0.96111554,0.03888446,1
621
+ 0.99825794,0.001742065,1
622
+ 0.9977399,0.002260089,1
623
+ 0.0023532272,0.99764675,0
624
+ 0.9247228,0.07527721,1
625
+ 0.7478041,0.2521959,1
626
+ 0.036233276,0.9637667,0
627
+ 0.0029513133,0.9970487,0
628
+ 0.99947685,0.00052314997,1
629
+ 0.9996666,0.00033342838,1
630
+ 0.7425078,0.25749218,1
631
+ 0.9772031,0.022796929,1
632
+ 0.8655796,0.1344204,1
633
+ 0.9995788,0.00042122602,1
634
+ 0.9996842,0.0003157854,1
635
+ 0.9104866,0.08951342,1
636
+ 0.99956423,0.00043576956,1
637
+ 0.007057377,0.99294263,0
638
+ 0.004149575,0.99585044,0
639
+ 0.026695607,0.9733044,0
640
+ 0.7300132,0.2699868,1
641
+ 0.9956255,0.004374504,1
642
+ 0.99942833,0.00057166815,1
643
+ 0.9995958,0.0004041791,1
644
+ 0.07480477,0.9251952,0
645
+ 0.011391754,0.98860824,0
646
+ 0.4421149,0.5578851,0
647
+ 0.0012013952,0.9987986,0
648
+ 0.006871821,0.9931282,0
649
+ 0.391892,0.60810804,0
650
+ 0.99964845,0.0003515482,1
651
+ 0.41236055,0.58763945,0
652
+ 0.9995777,0.0004222989,1
653
+ 0.9513238,0.048676193,1
654
+ 0.02247429,0.9775257,0
655
+ 0.99961203,0.00038796663,1
656
+ 0.9468533,0.05314672,1
657
+ 0.98439837,0.015601635,1
658
+ 0.0031047892,0.9968952,0
659
+ 0.958701,0.041298985,1
660
+ 0.99885774,0.0011422634,1
661
+ 0.0076476075,0.99235237,0
662
+ 0.0014872695,0.99851274,0
663
+ 0.98873585,0.011264145,1
664
+ 0.0009220427,0.999078,0
665
+ 0.99963975,0.00036025047,1
666
+ 0.9990791,0.00092089176,1
667
+ 0.9945247,0.0054752827,1
668
+ 0.97406137,0.02593863,1
669
+ 0.99960524,0.00039476156,1
670
+ 0.013597177,0.9864028,0
671
+ 0.0015178242,0.99848217,0
672
+ 0.69138926,0.30861074,1
673
+ 0.057320658,0.94267935,0
674
+ 0.9984805,0.0015195012,1
675
+ 0.005375191,0.9946248,0
676
+ 0.0010909572,0.99890906,0
677
+ 0.015485921,0.98451406,0
678
+ 0.10363577,0.8963642,0
679
+ 0.28017968,0.7198203,0
680
+ 0.0009997106,0.9990003,0
681
+ 0.0046229176,0.99537706,0
682
+ 0.19204369,0.80795634,0
683
+ 0.025717983,0.974282,0
684
+ 0.99283326,0.0071667433,1
685
+ 0.0011783387,0.9988217,0
686
+ 0.0054170275,0.99458295,0
687
+ 0.0059404606,0.99405956,0
688
+ 0.009232328,0.99076766,0
689
+ 0.00088326714,0.9991167,0
690
+ 0.9866468,0.013353229,1
691
+ 0.0064033656,0.9935966,0
692
+ 0.33239034,0.6676097,0
693
+ 0.0036843626,0.99631566,0
694
+ 0.0036104084,0.99638957,0
695
+ 0.9776989,0.022301078,1
696
+ 0.9980215,0.0019785166,1
697
+ 0.99966586,0.00033414364,1
698
+ 0.0032485349,0.9967515,0
699
+ 0.99948406,0.0005159378,1
700
+ 0.9993179,0.00068211555,1
701
+ 0.98860115,0.011398852,1
702
+ 0.0134891495,0.9865109,0
703
+ 0.99823076,0.0017692447,1
704
+ 0.99951744,0.0004825592,1
705
+ 0.99873513,0.0012648702,1
706
+ 0.9995284,0.00047159195,1
707
+ 0.9996455,0.00035452843,1
708
+ 0.9980975,0.0019025207,1
709
+ 0.99729735,0.0027026534,1
710
+ 0.0013343081,0.9986657,0
711
+ 0.00078171206,0.9992183,0
712
+ 0.9892482,0.010751784,1
713
+ 0.8806048,0.1193952,1
714
+ 0.5932761,0.40672392,1
715
+ 0.99789304,0.0021069646,1
716
+ 0.98013544,0.01986456,1
717
+ 0.8555144,0.1444856,1
718
+ 0.99953675,0.0004632473,1
719
+ 0.046722386,0.9532776,0
720
+ 0.029049749,0.97095025,0
721
+ 0.01705941,0.9829406,0
722
+ 0.015308539,0.98469144,0
723
+ 0.009469679,0.9905303,0
724
+ 0.9625793,0.03742069,1
725
+ 0.2549614,0.7450386,0
726
+ 0.17780143,0.82219857,0
727
+ 0.9995167,0.00048327446,1
728
+ 0.9871432,0.0128567815,1
729
+ 0.9474326,0.052567422,1
730
+ 0.029327566,0.9706724,0
731
+ 0.0984518,0.9015482,0
732
+ 0.0034802146,0.9965198,0
733
+ 0.0055489377,0.99445105,0
734
+ 0.002309628,0.9976904,0
735
+ 0.99849856,0.001501441,1
736
+ 0.065003425,0.9349966,0
737
+ 0.9995517,0.00044828653,1
738
+ 0.9988361,0.0011638999,1
739
+ 0.8338294,0.1661706,1
740
+ 0.9987832,0.0012168288,1
741
+ 0.9993198,0.0006802082,1
742
+ 0.3442681,0.6557319,0
743
+ 0.99929523,0.0007047653,1
744
+ 0.99969494,0.00030505657,1
745
+ 0.9663459,0.033654094,1
746
+ 0.61687434,0.38312566,1
747
+ 0.043087162,0.9569128,0
748
+ 0.99770147,0.002298534,1
749
+ 0.9995296,0.00047039986,1
750
+ 0.006390712,0.9936093,0
751
+ 0.008467348,0.9915326,0
752
+ 0.9996506,0.00034940243,1
753
+ 0.9872996,0.012700379,1
754
+ 0.4091638,0.59083617,0
755
+ 0.97830015,0.021699846,1
756
+ 0.0021611904,0.9978388,0
757
+ 0.25428447,0.7457155,0
758
+ 0.7072846,0.29271537,1
759
+ 0.9984754,0.0015246272,1
760
+ 0.36703017,0.63296986,0
761
+ 0.9997025,0.00029748678,1
762
+ 0.008219618,0.9917804,0
763
+ 0.011267997,0.988732,0
764
+ 0.94692117,0.05307883,1
765
+ 0.99936944,0.00063055754,1
766
+ 0.416858,0.58314204,0
767
+ 0.0015334942,0.9984665,0
768
+ 0.010142048,0.989858,0
769
+ 0.9943851,0.0056148767,1
770
+ 0.7926817,0.2073183,1
771
+ 0.9031099,0.09689009,1
772
+ 0.012353224,0.98764676,0
773
+ 0.9943125,0.005687475,1
774
+ 0.99853325,0.0014667511,1
775
+ 0.994635,0.005365014,1
776
+ 0.99487793,0.0051220655,1
777
+ 0.9992243,0.00077569485,1
778
+ 0.02781178,0.97218823,0
779
+ 0.40638718,0.5936128,0
780
+ 0.07849715,0.9215028,0
781
+ 0.01648748,0.9835125,0
782
+ 0.048996612,0.9510034,0
783
+ 0.00728767,0.9927123,0
784
+ 0.006706604,0.9932934,0
785
+ 0.06814605,0.93185395,0
786
+ 0.018145913,0.9818541,0
787
+ 0.999567,0.00043302774,1
788
+ 0.99788314,0.002116859,1
789
+ 0.0026534607,0.9973465,0
790
+ 0.9992244,0.00077557564,1
791
+ 0.42804682,0.5719532,0
792
+ 0.99770087,0.00229913,1
793
+ 0.19632193,0.80367804,0
794
+ 0.9953793,0.0046206713,1
795
+ 0.007813619,0.99218637,0
796
+ 0.0007310288,0.99926895,0
797
+ 0.31305096,0.686949,0
798
+ 0.035585117,0.9644149,0
799
+ 0.9969067,0.0030933022,1
800
+ 0.0012181986,0.9987818,0
801
+ 0.0069421385,0.99305785,0
802
+ 0.01193047,0.98806953,0
803
+ 0.9996014,0.00039857626,1
804
+ 0.9996247,0.00037527084,1
805
+ 0.0042599356,0.99574006,0
806
+ 0.5610344,0.43896562,1
807
+ 0.0027536282,0.9972464,0
808
+ 0.017462518,0.9825375,0
809
+ 0.9987909,0.0012090802,1
810
+ 0.009180291,0.9908197,0
811
+ 0.95556694,0.044433057,1
812
+ 0.063967526,0.9360325,0
813
+ 0.3866037,0.6133963,0
814
+ 0.022595044,0.97740495,0
815
+ 0.9995704,0.00042957067,1
816
+ 0.9994986,0.0005013943,1
817
+ 0.99851507,0.0014849305,1
818
+ 0.0032553014,0.9967447,0
819
+ 0.004814238,0.99518573,0
820
+ 0.010095393,0.9899046,0
821
+ 0.99936014,0.00063985586,1
822
+ 0.010375738,0.98962426,0
823
+ 0.00250223,0.9974978,0
824
+ 0.0010809761,0.998919,0
825
+ 0.64924383,0.35075617,1
826
+ 0.013929181,0.9860708,0
827
+ 0.91326743,0.08673257,1
828
+ 0.008155369,0.99184465,0
829
+ 0.00282503,0.997175,0
830
+ 0.8682339,0.13176608,1
831
+ 0.99209386,0.007906139,1
832
+ 0.026378741,0.97362125,0
833
+ 0.0021287256,0.9978713,0
834
+ 0.0056213904,0.9943786,0
835
+ 0.0037965272,0.9962035,0
836
+ 0.022128407,0.9778716,0
837
+ 0.004521801,0.9954782,0
838
+ 0.998231,0.0017690063,1
839
+ 0.0020805101,0.9979195,0
840
+ 0.9788005,0.021199524,1
841
+ 0.004045166,0.9959548,0
842
+ 0.99972886,0.00027114153,1
843
+ 0.1521876,0.8478124,0
844
+ 0.99593425,0.004065752,1
845
+ 0.34138635,0.6586137,0
846
+ 0.0010261744,0.99897385,0
847
+ 0.9815207,0.018479288,1
848
+ 0.00949593,0.9905041,0
849
+ 0.9965564,0.0034435987,1
850
+ 0.70186126,0.29813874,1
851
+ 0.113967925,0.8860321,0
852
+ 0.77701527,0.22298473,1
853
+ 0.19588873,0.80411124,0
854
+ 0.004597838,0.99540216,0
855
+ 0.6042087,0.3957913,1
856
+ 0.0034275898,0.99657243,0
857
+ 0.9996283,0.00037169456,1
858
+ 0.63604623,0.36395377,1
859
+ 0.0024783388,0.99752164,0
860
+ 0.0028105555,0.99718946,0
861
+ 0.009743954,0.9902561,0
862
+ 0.999316,0.0006840229,1
863
+ 0.9618739,0.03812611,1
864
+ 0.015731536,0.9842685,0
865
+ 0.997047,0.002952993,1
866
+ 0.9917231,0.00827688,1
867
+ 0.999464,0.00053602457,1
868
+ 0.9773626,0.022637427,1
869
+ 0.9556757,0.04432428,1
870
+ 0.0014355642,0.9985644,0
871
+ 0.017239327,0.98276067,0
872
+ 0.18996261,0.8100374,0
873
+ 0.049572118,0.9504279,0
874
+ 0.9994351,0.0005648732,1
875
+ 0.9996207,0.00037932396,1
876
+ 0.9994692,0.00053077936,1
877
+ 0.99944204,0.0005579591,1
878
+ 0.9996251,0.00037491322,1
879
+ 0.9991635,0.0008364916,1
880
+ 0.002465771,0.9975342,0
881
+ 0.9997476,0.00025242567,1
882
+ 0.002994984,0.99700505,0
883
+ 0.088719636,0.9112804,0
884
+ 0.00396041,0.99603957,0
885
+ 0.998599,0.0014010072,1
886
+ 0.98077214,0.019227862,1
887
+ 0.007507816,0.9924922,0
888
+ 0.9789281,0.02107191,1
889
+ 0.9991793,0.00082069635,1
890
+ 0.99954826,0.0004517436,1
891
+ 0.99422896,0.005771041,1
892
+ 0.0033191107,0.9966809,0
893
+ 0.5738977,0.42610228,1
894
+ 0.0011540877,0.99884593,0
895
+ 0.9266318,0.07336819,1
896
+ 0.99969995,0.00030004978,1
897
+ 0.9997093,0.00029069185,1
898
+ 0.9698108,0.030189216,1
899
+ 0.99899954,0.001000464,1
900
+ 0.99974054,0.00025945902,1
901
+ 0.99498284,0.0050171614,1
902
+ 0.0019976126,0.9980024,0
903
+ 0.5745932,0.4254068,1
904
+ 0.93890846,0.061091542,1
905
+ 0.99632,0.0036799908,1
906
+ 0.8413946,0.1586054,1
907
+ 0.004941065,0.99505895,0
908
+ 0.34980887,0.6501911,0
909
+ 0.055885654,0.9441143,0
910
+ 0.001840426,0.9981596,0
911
+ 0.063957416,0.9360426,0
912
+ 0.03777573,0.96222425,0
913
+ 0.99955815,0.00044184923,1
914
+ 0.0029776304,0.9970224,0
915
+ 0.34429103,0.65570897,0
916
+ 0.6439165,0.3560835,1
917
+ 0.9681225,0.031877518,1
918
+ 0.9705635,0.029436529,1
919
+ 0.9994497,0.0005503297,1
920
+ 0.9888681,0.011131883,1
921
+ 0.9973061,0.0026938915,1
922
+ 0.07972264,0.92027736,0
923
+ 0.002412421,0.99758756,0
924
+ 0.9706999,0.029300094,1
925
+ 0.04619435,0.9538056,0
926
+ 0.0013990959,0.9986009,0
927
+ 0.023491694,0.9765083,0
928
+ 0.031752598,0.9682474,0
929
+ 0.7393588,0.26064122,1
930
+ 0.9898649,0.010135114,1
931
+ 0.05718207,0.9428179,0
932
+ 0.004899051,0.995101,0
933
+ 0.0024078062,0.9975922,0
934
+ 0.2859047,0.7140953,0
935
+ 0.8539173,0.1460827,1
936
+ 0.031862915,0.9681371,0
937
+ 0.0014795412,0.99852043,0
938
+ 0.0032204143,0.99677956,0
939
+ 0.9975879,0.0024120808,1
940
+ 0.99963045,0.0003695488,1
941
+ 0.0019866885,0.9980133,0
942
+ 0.004976888,0.99502313,0
943
+ 0.004723213,0.9952768,0
944
+ 0.6117913,0.3882087,1
945
+ 0.0018723819,0.99812764,0
946
+ 0.8460553,0.15394467,1
947
+ 0.01628444,0.98371553,0
948
+ 0.01619181,0.98380816,0
949
+ 0.012518686,0.9874813,0
950
+ 0.88967174,0.11032826,1
951
+ 0.99242425,0.0075757504,1
952
+ 0.83529323,0.16470677,1
953
+ 0.003095187,0.9969048,0
954
+ 0.020491756,0.9795082,0
955
+ 0.99966383,0.0003361702,1
956
+ 0.9985677,0.0014322996,1
957
+ 0.041374166,0.95862585,0
958
+ 0.0542903,0.9457097,0
959
+ 0.00894376,0.99105626,0
960
+ 0.23739028,0.7626097,0
961
+ 0.9499496,0.050050378,1
962
+ 0.059399553,0.94060045,0
963
+ 0.0012133729,0.9987866,0
964
+ 0.004152076,0.99584794,0
965
+ 0.005733377,0.9942666,0
966
+ 0.0044340687,0.99556595,0
967
+ 0.0056635104,0.9943365,0
968
+ 0.99948704,0.0005129576,1
969
+ 0.9996141,0.00038588047,1
970
+ 0.9533444,0.046655595,1
971
+ 0.9885698,0.011430204,1
972
+ 0.9979911,0.002008915,1
973
+ 0.9733104,0.026689589,1
974
+ 0.002265488,0.9977345,0
975
+ 0.0029692505,0.99703074,0
976
+ 0.41565698,0.584343,0
977
+ 0.99887604,0.0011239648,1
978
+ 0.9817131,0.018286884,1
979
+ 0.0065196976,0.9934803,0
980
+ 0.010362903,0.9896371,0
981
+ 0.0009618355,0.99903816,0
982
+ 0.007063865,0.99293613,0
983
+ 0.0023501497,0.99764985,0
984
+ 0.99951184,0.00048816204,1
985
+ 0.9976413,0.002358675,1
986
+ 0.0039149723,0.99608505,0
987
+ 0.99842983,0.0015701652,1
988
+ 0.9983327,0.0016673207,1
989
+ 0.016547713,0.98345226,0
990
+ 0.08896693,0.9110331,0
991
+ 0.9984301,0.0015699267,1
992
+ 0.0026862537,0.99731374,0
993
+ 0.4920763,0.5079237,0
994
+ 0.8022609,0.19773912,1
995
+ 0.107578926,0.89242107,0
996
+ 0.29686928,0.7031307,0
997
+ 0.0040904405,0.9959096,0
998
+ 0.38031778,0.6196822,0
999
+ 0.10719683,0.8928032,0
1000
+ 0.025295923,0.9747041,0
1001
+ 0.7578574,0.24214262,1
1002
+ 0.00093312084,0.9990669,0
1003
+ 0.0022984277,0.9977016,0
1004
+ 0.9125145,0.08748549,1
1005
+ 0.9976572,0.0023428202,1
1006
+ 0.9995987,0.00040131807,1
1007
+ 0.99948347,0.00051653385,1
1008
+ 0.50495,0.49505,1
1009
+ 0.0020028213,0.99799716,0
1010
+ 0.99851614,0.0014838576,1
1011
+ 0.004226973,0.995773,0
1012
+ 0.9995148,0.0004851818,1
1013
+ 0.0022908365,0.99770916,0
1014
+ 0.9996407,0.0003592968,1
1015
+ 0.0094337305,0.99056625,0
1016
+ 0.051959947,0.94804007,0
1017
+ 0.100972965,0.89902705,0
1018
+ 0.99316746,0.00683254,1
1019
+ 0.99966455,0.00033545494,1
1020
+ 0.96641433,0.033585668,1
1021
+ 0.9984561,0.0015438795,1
1022
+ 0.9958217,0.0041782856,1
1023
+ 0.97992045,0.020079553,1
1024
+ 0.99671316,0.0032868385,1
1025
+ 0.003655219,0.9963448,0
1026
+ 0.99861085,0.0013891459,1
1027
+ 0.5625484,0.4374516,1
1028
+ 0.0016890721,0.9983109,0
1029
+ 0.012691243,0.98730874,0
1030
+ 0.005846348,0.9941537,0
1031
+ 0.0013850372,0.99861497,0
1032
+ 0.0009262542,0.99907374,0
1033
+ 0.9997067,0.00029331446,1
1034
+ 0.027922938,0.9720771,0
1035
+ 0.9996723,0.00032770634,1
1036
+ 0.9997172,0.00028282404,1
1037
+ 0.6442366,0.35576338,1
1038
+ 0.998949,0.0010510087,1
1039
+ 0.99803144,0.0019685626,1
1040
+ 0.010431405,0.9895686,0
1041
+ 0.99966574,0.00033426285,1
1042
+ 0.032135025,0.967865,0
1043
+ 0.85126805,0.14873195,1
1044
+ 0.9956416,0.004358411,1
1045
+ 0.9962423,0.0037577152,1
1046
+ 0.01260141,0.98739856,0
1047
+ 0.9993387,0.00066131353,1
1048
+ 0.04168818,0.9583118,0
1049
+ 0.9893092,0.010690808,1
1050
+ 0.9988865,0.0011134744,1
1051
+ 0.010328503,0.98967147,0
1052
+ 0.9957491,0.004250884,1
1053
+ 0.9024997,0.097500324,1
1054
+ 0.002249894,0.9977501,0
1055
+ 0.144724,0.855276,0
1056
+ 0.99876106,0.0012389421,1
1057
+ 0.99359965,0.0064003468,1
1058
+ 0.011983345,0.98801666,0
1059
+ 0.0019636978,0.9980363,0
1060
+ 0.03982107,0.9601789,0
1061
+ 0.0012464254,0.99875355,0
1062
+ 0.9298671,0.07013291,1
1063
+ 0.0016311621,0.99836886,0
1064
+ 0.007126019,0.99287397,0
1065
+ 0.99957484,0.00042515993,1
1066
+ 0.99766326,0.0023367405,1
1067
+ 0.23659724,0.76340276,0
1068
+ 0.97749287,0.022507131,1
1069
+ 0.6562963,0.3437037,1
1070
+ 0.91976076,0.08023924,1
1071
+ 0.99932146,0.0006785393,1
1072
+ 0.0021506352,0.99784935,0
1073
+ 0.0016553648,0.99834466,0
1074
+ 0.9997229,0.000277102,1
1075
+ 0.0034490281,0.996551,0
1076
+ 0.9230728,0.076927185,1
1077
+ 0.9996667,0.00033330917,1
1078
+ 0.0017702382,0.99822974,0
1079
+ 0.9984824,0.0015175939,1
1080
+ 0.974391,0.025609016,1
1081
+ 0.99969983,0.000300169,1
1082
+ 0.0020211067,0.99797887,0
1083
+ 0.008136614,0.99186337,0
1084
+ 0.06361628,0.9363837,0
1085
+ 0.9946912,0.005308807,1
1086
+ 0.98451614,0.015483856,1
1087
+ 0.9992894,0.0007106066,1
1088
+ 0.99921274,0.00078725815,1
1089
+ 0.99930084,0.0006991625,1
1090
+ 0.99955446,0.00044554472,1
1091
+ 0.013202032,0.986798,0
1092
+ 0.0008121275,0.9991879,0
1093
+ 0.038518712,0.9614813,0
1094
+ 0.0010893516,0.99891067,0
1095
+ 0.9994326,0.0005673766,1
1096
+ 0.69146097,0.30853903,1
1097
+ 0.026844576,0.97315544,0
1098
+ 0.0022876172,0.9977124,0
1099
+ 0.32596463,0.6740354,0
1100
+ 0.91840726,0.08159274,1
1101
+ 0.81475776,0.18524224,1
1102
+ 0.0030581264,0.99694186,0
1103
+ 0.0012270099,0.998773,0
1104
+ 0.9908652,0.009134829,1
1105
+ 0.21851178,0.78148824,0
1106
+ 0.014970546,0.98502946,0
1107
+ 0.007663394,0.99233663,0
1108
+ 0.9986833,0.0013167262,1
1109
+ 0.016385201,0.9836148,0
1110
+ 0.9963666,0.00363338,1
1111
+ 0.0015946553,0.99840534,0
1112
+ 0.049851425,0.9501486,0
1113
+ 0.3186957,0.68130434,0
1114
+ 0.99797267,0.0020273328,1
1115
+ 0.9974722,0.0025277734,1
1116
+ 0.0013546338,0.99864537,0
1117
+ 0.059121832,0.94087815,0
1118
+ 0.033603087,0.9663969,0
1119
+ 0.027140869,0.97285914,0
1120
+ 0.2319708,0.7680292,0
1121
+ 0.005793132,0.99420685,0
1122
+ 0.99917704,0.00082296133,1
1123
+ 0.008222959,0.99177706,0
1124
+ 0.9975922,0.0024077892,1
1125
+ 0.023219164,0.97678083,0
1126
+ 0.9996476,0.00035238266,1
1127
+ 0.9985669,0.0014330745,1
1128
+ 0.3595961,0.64040387,0
1129
+ 0.0019907297,0.99800926,0
1130
+ 0.9992471,0.0007529259,1
1131
+ 0.002476532,0.9975235,0
1132
+ 0.30555892,0.6944411,0
1133
+ 0.073036134,0.92696387,0
1134
+ 0.0033976436,0.99660236,0
1135
+ 0.9118526,0.0881474,1
1136
+ 0.009667363,0.99033266,0
1137
+ 0.0028820932,0.9971179,0
1138
+ 0.0269562,0.9730438,0
1139
+ 0.38539428,0.6146057,0
1140
+ 0.0015605742,0.99843943,0
1141
+ 0.0015145009,0.9984855,0
1142
+ 0.49441293,0.5055871,0
1143
+ 0.0012432414,0.99875677,0
1144
+ 0.012725895,0.9872741,0
1145
+ 0.0014967809,0.9985032,0
1146
+ 0.0022543557,0.99774563,0
1147
+ 0.0036533056,0.9963467,0
1148
+ 0.79465616,0.20534384,1
1149
+ 0.99945706,0.0005429387,1
1150
+ 0.0015484457,0.99845153,0
1151
+ 0.23672166,0.76327837,0
1152
+ 0.99933714,0.00066286325,1
1153
+ 0.9992545,0.0007454753,1
1154
+ 0.018010784,0.9819892,0
1155
+ 0.35984796,0.64015204,0
1156
+ 0.03255315,0.96744686,0
1157
+ 0.00635857,0.99364144,0
1158
+ 0.003481283,0.99651873,0
1159
+ 0.004029874,0.99597013,0
1160
+ 0.96998805,0.030011952,1
1161
+ 0.9995035,0.0004965067,1
1162
+ 0.97326535,0.02673465,1
1163
+ 0.15379971,0.8462003,0
1164
+ 0.9875378,0.012462199,1
1165
+ 0.9947543,0.0052456856,1
1166
+ 0.9972589,0.0027410984,1
1167
+ 0.0022406196,0.9977594,0
1168
+ 0.05233742,0.9476626,0
1169
+ 0.9996507,0.00034928322,1
1170
+ 0.08184431,0.91815567,0
1171
+ 0.9850974,0.014902592,1
1172
+ 0.00154207,0.9984579,0
1173
+ 0.068061516,0.93193847,0
1174
+ 0.9939302,0.0060697794,1
1175
+ 0.99886215,0.0011378527,1
1176
+ 0.0015113561,0.99848866,0
1177
+ 0.0152161475,0.9847838,0
1178
+ 0.020305803,0.9796942,0
1179
+ 0.05149378,0.94850624,0
1180
+ 0.0011854175,0.9988146,0
1181
+ 0.026376074,0.97362393,0
1182
+ 0.99967,0.0003299713,1
1183
+ 0.005790658,0.99420935,0
1184
+ 0.018367648,0.98163235,0
1185
+ 0.0019875073,0.9980125,0
1186
+ 0.9150737,0.08492631,1
1187
+ 0.98478544,0.015214562,1
1188
+ 0.009416436,0.99058354,0
1189
+ 0.55437297,0.44562703,1
1190
+ 0.99855846,0.0014415383,1
1191
+ 0.997712,0.002287984,1
1192
+ 0.05325386,0.9467461,0
1193
+ 0.9996731,0.00032687187,1
1194
+ 0.99603313,0.003966868,1
1195
+ 0.94937605,0.050623953,1
1196
+ 0.040828884,0.9591711,0
1197
+ 0.026851915,0.9731481,0
1198
+ 0.0020019389,0.99799806,0
1199
+ 0.9987338,0.0012661815,1
1200
+ 0.999694,0.00030601025,1
1201
+ 0.9951792,0.0048208237,1
1202
+ 0.010280439,0.98971957,0
1203
+ 0.0072139497,0.99278605,0
1204
+ 0.20315804,0.796842,0
1205
+ 0.012223116,0.9877769,0
1206
+ 0.0014825064,0.9985175,0
1207
+ 0.99701,0.0029900074,1
1208
+ 0.007782429,0.9922176,0
1209
+ 0.00553273,0.99446726,0
1210
+ 0.001234608,0.9987654,0
1211
+ 0.0035403005,0.9964597,0
1212
+ 0.62350154,0.37649846,1
1213
+ 0.030935526,0.9690645,0
1214
+ 0.9997024,0.000297606,1
1215
+ 0.034490183,0.96550983,0
1216
+ 0.014377186,0.9856228,0
1217
+ 0.004641575,0.9953584,0
1218
+ 0.9197556,0.08024442,1
1219
+ 0.0038577665,0.9961422,0
1220
+ 0.9644881,0.03551191,1
1221
+ 0.09537731,0.9046227,0
1222
+ 0.0055731297,0.99442685,0
1223
+ 0.9913105,0.008689523,1
1224
+ 0.99970573,0.00029426813,1
1225
+ 0.9980634,0.0019366145,1
1226
+ 0.9650151,0.034984887,1
1227
+ 0.99434644,0.00565356,1
1228
+ 0.99967,0.0003299713,1
1229
+ 0.24676669,0.7532333,0
1230
+ 0.016807081,0.9831929,0
1231
+ 0.77035934,0.22964066,1
1232
+ 0.9429843,0.057015717,1
1233
+ 0.9996346,0.00036537647,1
1234
+ 0.35616896,0.643831,0
1235
+ 0.72348094,0.27651906,1
1236
+ 0.99919504,0.0008049607,1
1237
+ 0.0030775182,0.9969225,0
1238
+ 0.9955479,0.0044521093,1
1239
+ 0.99656147,0.0034385324,1
1240
+ 0.1263428,0.8736572,0
1241
+ 0.9167096,0.0832904,1
1242
+ 0.62057126,0.37942874,1
1243
+ 0.9827916,0.017208397,1
1244
+ 0.98991287,0.010087132,1
1245
+ 0.58482105,0.41517895,1
1246
+ 0.9847498,0.015250206,1
1247
+ 0.017456146,0.9825438,0
1248
+ 0.0033670268,0.996633,0
1249
+ 0.067455925,0.93254405,0
1250
+ 0.008801719,0.9911983,0
1251
+ 0.99711263,0.0028873682,1
1252
+ 0.97653425,0.023465753,1
1253
+ 0.0015753305,0.99842465,0
1254
+ 0.025315812,0.9746842,0
1255
+ 0.0048881443,0.9951119,0
1256
+ 0.98227274,0.017727256,1
1257
+ 0.75232244,0.24767756,1
1258
+ 0.015338197,0.9846618,0
1259
+ 0.028242337,0.97175765,0
1260
+ 0.0029860225,0.997014,0
1261
+ 0.0092257215,0.9907743,0
1262
+ 0.9925874,0.0074126124,1
1263
+ 0.3053507,0.69464934,0
1264
+ 0.20891643,0.7910836,0
1265
+ 0.99899155,0.001008451,1
1266
+ 0.9890218,0.010978222,1
1267
+ 0.0030890193,0.996911,0
1268
+ 0.0016219382,0.99837804,0
1269
+ 0.0039517684,0.9960482,0
1270
+ 0.9979395,0.002060473,1
1271
+ 0.9927933,0.0072066784,1
1272
+ 0.9993703,0.0006297231,1
1273
+ 0.6998841,0.30011588,1
1274
+ 0.99954766,0.00045233965,1
1275
+ 0.02779311,0.9722069,0
1276
+ 0.9968592,0.0031408072,1
1277
+ 0.014288131,0.9857119,0
1278
+ 0.38433754,0.61566246,0
1279
+ 0.22327325,0.7767267,0
1280
+ 0.012611731,0.98738825,0
1281
+ 0.9849435,0.015056491,1
1282
+ 0.0270361,0.9729639,0
1283
+ 0.0015607317,0.99843925,0
1284
+ 0.9633292,0.036670804,1
1285
+ 0.9657903,0.03420973,1
1286
+ 0.97965574,0.020344257,1
1287
+ 0.9995334,0.00046658516,1
1288
+ 0.99930227,0.000697732,1
1289
+ 0.09106755,0.90893245,0
1290
+ 0.09101162,0.90898836,0
1291
+ 0.13524468,0.86475533,0
1292
+ 0.0018709146,0.99812907,0
1293
+ 0.06420994,0.93579006,0
1294
+ 0.036279976,0.96372,0
1295
+ 0.014073258,0.98592675,0
1296
+ 0.011641149,0.98835886,0
1297
+ 0.840176,0.15982401,1
1298
+ 0.0045021693,0.9954978,0
1299
+ 0.99861026,0.0013897419,1
1300
+ 0.99680364,0.0031963587,1
1301
+ 0.12989672,0.8701033,0
1302
+ 0.9993044,0.0006955862,1
1303
+ 0.9167421,0.08325791,1
1304
+ 0.973736,0.026264012,1
1305
+ 0.013045602,0.9869544,0
1306
+ 0.08042194,0.9195781,0
1307
+ 0.020277733,0.97972226,0
1308
+ 0.0010888084,0.9989112,0
1309
+ 0.8114757,0.1885243,1
1310
+ 0.010996237,0.9890038,0
1311
+ 0.9502845,0.04971552,1
1312
+ 0.0030244759,0.99697554,0
1313
+ 0.004883582,0.9951164,0
1314
+ 0.9399636,0.06003642,1
1315
+ 0.049094427,0.95090556,0
1316
+ 0.99973804,0.0002619624,1
1317
+ 0.17771359,0.8222864,0
1318
+ 0.9997304,0.0002695918,1
1319
+ 0.9995974,0.00040262938,1
1320
+ 0.9165622,0.0834378,1
1321
+ 0.002704514,0.9972955,0
1322
+ 0.9976847,0.0023152828,1
1323
+ 0.0016788145,0.9983212,0
1324
+ 0.007415337,0.99258465,0
1325
+ 0.9994849,0.00051510334,1
1326
+ 0.4993563,0.50064373,0
1327
+ 0.0014385482,0.99856144,0
1328
+ 0.02351278,0.9764872,0
1329
+ 0.02326621,0.9767338,0
1330
+ 0.001454128,0.9985459,0
1331
+ 0.0024773262,0.99752265,0
1332
+ 0.83914065,0.16085935,1
1333
+ 0.0010989618,0.998901,0
1334
+ 0.9997029,0.00029712915,1
1335
+ 0.99854565,0.0014543533,1
1336
+ 0.46985435,0.53014565,0
1337
+ 0.99826944,0.0017305613,1
1338
+ 0.0039111977,0.9960888,0
1339
+ 0.9976343,0.0023657084,1
1340
+ 0.0017176558,0.9982824,0
1341
+ 0.0032231521,0.9967768,0
1342
+ 0.99176836,0.00823164,1
1343
+ 0.006824911,0.9931751,0
1344
+ 0.9995277,0.0004723072,1
1345
+ 0.9885992,0.011400819,1
1346
+ 0.9994593,0.00054067373,1
1347
+ 0.007492461,0.9925075,0
1348
+ 0.972298,0.027701974,1
1349
+ 0.99797565,0.0020243526,1
1350
+ 0.013883206,0.98611677,0
1351
+ 0.9854586,0.014541388,1
1352
+ 0.9982987,0.0017012954,1
1353
+ 0.7993407,0.20065928,1
1354
+ 0.0015109148,0.9984891,0
1355
+ 0.99794275,0.0020572543,1
1356
+ 0.009570254,0.99042976,0
1357
+ 0.0059960196,0.99400395,0
1358
+ 0.60245603,0.39754397,1
1359
+ 0.010218779,0.9897812,0
1360
+ 0.9018308,0.09816921,1
1361
+ 0.0032540965,0.9967459,0
1362
+ 0.84531486,0.15468514,1
1363
+ 0.9756452,0.024354815,1
1364
+ 0.1849733,0.8150267,0
1365
+ 0.99217165,0.007828355,1
1366
+ 0.99935395,0.00064605474,1
1367
+ 0.99876773,0.0012322664,1
1368
+ 0.9995166,0.00048339367,1
1369
+ 0.9997111,0.0002889037,1
1370
+ 0.9994054,0.00059461594,1
1371
+ 0.99611485,0.00388515,1
1372
+ 0.17900935,0.8209907,0
1373
+ 0.009933155,0.9900668,0
1374
+ 0.0038156267,0.99618435,0
1375
+ 0.9990615,0.00093847513,1
1376
+ 0.99520385,0.0047961473,1
1377
+ 0.029874226,0.9701258,0
1378
+ 0.9967937,0.0032063127,1
1379
+ 0.09933858,0.9006614,0
1380
+ 0.9987204,0.0012795925,1
1381
+ 0.015697857,0.98430216,0
1382
+ 0.9925701,0.007429898,1
1383
+ 0.9867278,0.013272226,1
1384
+ 0.99914455,0.00085544586,1
1385
+ 0.9836601,0.016339898,1
1386
+ 0.525327,0.47467297,1
1387
+ 0.020378929,0.97962105,0
1388
+ 0.0018324937,0.9981675,0
1389
+ 0.9495852,0.0504148,1
1390
+ 0.0032422137,0.9967578,0
1391
+ 0.96246886,0.037531137,1
1392
+ 0.99614453,0.0038554668,1
1393
+ 0.95421183,0.04578817,1
1394
+ 0.0055039967,0.994496,0
1395
+ 0.99832076,0.0016792417,1
1396
+ 0.998494,0.001505971,1
1397
+ 0.0012942175,0.9987058,0
1398
+ 0.055142563,0.9448574,0
1399
+ 0.9987268,0.0012732148,1
1400
+ 0.9970571,0.0029429197,1
1401
+ 0.029225158,0.9707748,0
1402
+ 0.99958724,0.00041276217,1
1403
+ 0.002650222,0.9973498,0
1404
+ 0.0015009107,0.9984991,0
1405
+ 0.04394095,0.95605904,0
1406
+ 0.99958867,0.00041133165,1
1407
+ 0.0717451,0.9282549,0
1408
+ 0.9989544,0.0010455847,1
1409
+ 0.99959534,0.00040465593,1
1410
+ 0.0040666834,0.9959333,0
1411
+ 0.9996476,0.00035238266,1
1412
+ 0.010230654,0.98976934,0
1413
+ 0.9995239,0.0004761219,1
1414
+ 0.032932594,0.9670674,0
1415
+ 0.85309184,0.14690816,1
1416
+ 0.08747701,0.912523,0
1417
+ 0.99963045,0.0003695488,1
1418
+ 0.0010741318,0.99892586,0
1419
+ 0.001443551,0.99855644,0
1420
+ 0.0059006773,0.9940993,0
1421
+ 0.9996792,0.0003207922,1
1422
+ 0.9995215,0.0004785061,1
1423
+ 0.9834373,0.0165627,1
1424
+ 0.0048408913,0.9951591,0
1425
+ 0.0090420395,0.990958,0
1426
+ 0.71002907,0.28997093,1
1427
+ 0.5222266,0.47777343,1
1428
+ 0.008282867,0.99171716,0
1429
+ 0.99939525,0.0006047487,1
1430
+ 0.9953845,0.0046154857,1
1431
+ 0.0041763578,0.9958236,0
1432
+ 0.003937003,0.996063,0
1433
+ 0.99652535,0.0034746528,1
1434
+ 0.072026715,0.9279733,0
1435
+ 0.0035754272,0.99642456,0
1436
+ 0.0657536,0.9342464,0
1437
+ 0.99300295,0.006997049,1
1438
+ 0.9987446,0.001255393,1
1439
+ 0.0032521,0.9967479,0
1440
+ 0.80868036,0.19131964,1
1441
+ 0.99907184,0.0009281635,1
1442
+ 0.9980843,0.0019156933,1
1443
+ 0.9994578,0.00054222345,1
1444
+ 0.042431526,0.95756847,0
1445
+ 0.99652016,0.0034798384,1
1446
+ 0.8464605,0.15353948,1
1447
+ 0.8961511,0.103848875,1
1448
+ 0.9885268,0.011473179,1
1449
+ 0.09863896,0.90136105,0
1450
+ 0.9994524,0.0005475879,1
1451
+ 0.0011883671,0.99881166,0
1452
+ 0.6643362,0.3356638,1
1453
+ 0.016839577,0.98316044,0
1454
+ 0.04169707,0.9583029,0
1455
+ 0.9979527,0.0020473003,1
1456
+ 0.9956642,0.0043358207,1
1457
+ 0.0039458955,0.9960541,0
1458
+ 0.99917513,0.0008248687,1
1459
+ 0.9983329,0.0016670823,1
1460
+ 0.6699569,0.33004308,1
1461
+ 0.0052819303,0.9947181,0
1462
+ 0.9983935,0.001606524,1
1463
+ 0.98981583,0.010184169,1
1464
+ 0.002239228,0.9977608,0
1465
+ 0.018177524,0.9818225,0
1466
+ 0.99946135,0.0005386472,1
1467
+ 0.0022319676,0.99776804,0
1468
+ 0.1395876,0.8604124,0
1469
+ 0.51797867,0.48202133,1
1470
+ 0.0019589327,0.9980411,0
1471
+ 0.9995278,0.000472188,1
1472
+ 0.0046190796,0.99538094,0
1473
+ 0.99906355,0.0009364486,1
1474
+ 0.0018072262,0.9981928,0
1475
+ 0.007264418,0.99273556,0
1476
+ 0.0017746218,0.9982254,0
1477
+ 0.9996475,0.00035250187,1
1478
+ 0.007589062,0.99241096,0
1479
+ 0.99969506,0.00030493736,1
1480
+ 0.87792414,0.122075856,1
1481
+ 0.01996821,0.9800318,0
1482
+ 0.005560132,0.99443984,0
1483
+ 0.62146825,0.37853175,1
1484
+ 0.9995036,0.0004963875,1
1485
+ 0.99965847,0.00034153461,1
1486
+ 0.052455466,0.9475445,0
1487
+ 0.41687372,0.5831263,0
1488
+ 0.01030318,0.9896968,0
1489
+ 0.99860233,0.0013976693,1
1490
+ 0.015531475,0.9844685,0
1491
+ 0.95792025,0.042079747,1
1492
+ 0.9996045,0.00039547682,1
1493
+ 0.0050975713,0.99490243,0
1494
+ 0.9876594,0.012340605,1
1495
+ 0.0022248216,0.9977752,0
1496
+ 0.023918904,0.9760811,0
1497
+ 0.929903,0.07009703,1
1498
+ 0.0822437,0.9177563,0
1499
+ 0.99656504,0.003434956,1
1500
+ 0.99951196,0.00048804283,1
1501
+ 0.9994816,0.0005183816,1
1502
+ 0.002942923,0.9970571,0
1503
+ 0.02278239,0.9772176,0
1504
+ 0.9923834,0.0076165795,1
1505
+ 0.9954041,0.0045958757,1
1506
+ 0.0061417734,0.9938582,0
1507
+ 0.0018719889,0.998128,0
1508
+ 0.002736234,0.9972638,0
1509
+ 0.0031740104,0.996826,0
1510
+ 0.99933296,0.0006670356,1
1511
+ 0.99942505,0.0005749464,1
1512
+ 0.995561,0.0044389963,1
1513
+ 0.0019285189,0.9980715,0
1514
+ 0.031854857,0.96814513,0
1515
+ 0.9208369,0.079163074,1
1516
+ 0.9994849,0.00051510334,1
1517
+ 0.0015442551,0.99845576,0
1518
+ 0.9991047,0.00089532137,1
1519
+ 0.9807288,0.019271195,1
1520
+ 0.0017318215,0.9982682,0
1521
+ 0.99789953,0.0021004677,1
1522
+ 0.011053641,0.9889464,0
1523
+ 0.99964404,0.00035595894,1
1524
+ 0.007632611,0.9923674,0
1525
+ 0.005098137,0.99490184,0
1526
+ 0.99944407,0.0005559325,1
1527
+ 0.98394185,0.016058147,1
1528
+ 0.0074339127,0.9925661,0
1529
+ 0.08361898,0.916381,0
1530
+ 0.0012433121,0.9987567,0
1531
+ 0.9892075,0.010792494,1
1532
+ 0.0017719731,0.998228,0
1533
+ 0.96539545,0.03460455,1
1534
+ 0.9986331,0.0013669133,1
1535
+ 0.06734009,0.9326599,0
1536
+ 0.99941456,0.0005854368,1
1537
+ 0.07179671,0.9282033,0
1538
+ 0.99960357,0.0003964305,1
1539
+ 0.98503786,0.014962137,1
1540
+ 0.96524197,0.03475803,1
1541
+ 0.99878675,0.0012132525,1
1542
+ 0.0008635663,0.99913645,0
1543
+ 0.8957919,0.10420811,1
1544
+ 0.8171658,0.18283421,1
1545
+ 0.004388231,0.9956118,0
1546
+ 0.008928414,0.9910716,0
1547
+ 0.0058229016,0.9941771,0
1548
+ 0.9507413,0.04925871,1
1549
+ 0.0069530113,0.993047,0
1550
+ 0.0029252893,0.9970747,0
1551
+ 0.004337367,0.9956626,0
1552
+ 0.0089890305,0.99101096,0
1553
+ 0.0039769495,0.99602306,0
1554
+ 0.99966586,0.00033414364,1
1555
+ 0.98868215,0.011317849,1
1556
+ 0.99932003,0.0006799698,1
1557
+ 0.0014281215,0.9985719,0
1558
+ 0.028855536,0.97114444,0
1559
+ 0.17490831,0.8250917,0
1560
+ 0.004751372,0.9952486,0
1561
+ 0.32029593,0.67970407,0
1562
+ 0.0018236204,0.9981764,0
1563
+ 0.0049955347,0.9950045,0
1564
+ 0.9959706,0.004029393,1
1565
+ 0.9963278,0.0036721826,1
1566
+ 0.0053753415,0.9946247,0
1567
+ 0.9993887,0.00061130524,1
1568
+ 0.0029191829,0.9970808,0
1569
+ 0.9729604,0.027039587,1
1570
+ 0.7769615,0.2230385,1
1571
+ 0.9948954,0.0051046014,1
1572
+ 0.0026113605,0.99738866,0
1573
+ 0.9987748,0.0012251735,1
1574
+ 0.999584,0.00041598082,1
1575
+ 0.99943227,0.00056773424,1
1576
+ 0.9831041,0.01689589,1
1577
+ 0.52868277,0.47131723,1
1578
+ 0.99933213,0.00066787004,1
1579
+ 0.4778809,0.5221191,0
1580
+ 0.011334694,0.9886653,0
1581
+ 0.99900657,0.0009934306,1
1582
+ 0.99918324,0.00081676245,1
1583
+ 0.9955811,0.0044189095,1
1584
+ 0.07140516,0.9285948,0
1585
+ 0.9994165,0.0005835295,1
1586
+ 0.9974892,0.002510786,1
1587
+ 0.012244845,0.9877552,0
1588
+ 0.9803711,0.019628882,1
1589
+ 0.99974686,0.00025314093,1
1590
+ 0.0046537737,0.99534625,0
1591
+ 0.0021557608,0.9978442,0
1592
+ 0.006846445,0.9931536,0
1593
+ 0.03608174,0.96391827,0
1594
+ 0.9776883,0.022311687,1
1595
+ 0.99922633,0.0007736683,1
1596
+ 0.99889034,0.0011096597,1
1597
+ 0.99892765,0.0010723472,1
1598
+ 0.9826744,0.01732558,1
1599
+ 0.99718624,0.0028137565,1
1600
+ 0.93252295,0.06747705,1
1601
+ 0.0010369178,0.99896306,0
1602
+ 0.11282801,0.887172,0
1603
+ 0.003802646,0.99619734,0
1604
+ 0.99968135,0.00031864643,1
1605
+ 0.052472122,0.9475279,0
1606
+ 0.0025673856,0.9974326,0
1607
+ 0.94831115,0.05168885,1
1608
+ 0.9973341,0.0026658773,1
1609
+ 0.0038341202,0.9961659,0
1610
+ 0.99929905,0.0007009506,1
1611
+ 0.20453553,0.79546446,0
1612
+ 0.002398736,0.9976013,0
1613
+ 0.99872345,0.0012765527,1
1614
+ 0.01726367,0.98273635,0
1615
+ 0.9816835,0.018316507,1
1616
+ 0.9939201,0.006079912,1
1617
+ 0.0011833311,0.99881667,0
1618
+ 0.10481991,0.8951801,0
1619
+ 0.96249074,0.037509263,1
1620
+ 0.004439258,0.99556077,0
1621
+ 0.030734256,0.96926576,0
1622
+ 0.40253726,0.5974628,0
1623
+ 0.9996387,0.00036132336,1
1624
+ 0.0014498043,0.9985502,0
1625
+ 0.9995264,0.0004736185,1
1626
+ 0.103664376,0.8963356,0
1627
+ 0.0023229967,0.997677,0
1628
+ 0.006421333,0.9935787,0
1629
+ 0.37353483,0.6264652,0
1630
+ 0.50394565,0.49605435,1
1631
+ 0.0013117989,0.9986882,0
1632
+ 0.9381904,0.0618096,1
1633
+ 0.9693514,0.03064859,1
1634
+ 0.020989085,0.97901094,0
1635
+ 0.9995921,0.00040787458,1
1636
+ 0.99963605,0.00036394596,1
1637
+ 0.009297834,0.99070215,0
1638
+ 0.99960905,0.00039094687,1
1639
+ 0.99955124,0.00044876337,1
1640
+ 0.99945873,0.0005412698,1
1641
+ 0.61848813,0.38151187,1
1642
+ 0.017595239,0.98240477,0
1643
+ 0.009341048,0.99065894,0
1644
+ 0.015007501,0.9849925,0
1645
+ 0.9754591,0.024540901,1
1646
+ 0.08949951,0.91050047,0
1647
+ 0.0043370333,0.995663,0
1648
+ 0.01012327,0.98987675,0
1649
+ 0.0075733266,0.9924267,0
1650
+ 0.012568837,0.98743117,0
1651
+ 0.99525094,0.0047490597,1
1652
+ 0.9757243,0.02427572,1
1653
+ 0.0026445866,0.9973554,0
1654
+ 0.009916109,0.9900839,0
1655
+ 0.002435114,0.9975649,0
1656
+ 0.010098687,0.9899013,0
1657
+ 0.808107,0.19189298,1
1658
+ 0.9980204,0.0019795895,1
1659
+ 0.03267146,0.96732855,0
1660
+ 0.0010410819,0.99895895,0
1661
+ 0.0016349988,0.998365,0
1662
+ 0.99909115,0.0009088516,1
1663
+ 0.937187,0.062812984,1
1664
+ 0.013449775,0.9865502,0
1665
+ 0.99940383,0.00059616566,1
1666
+ 0.062426973,0.937573,0
1667
+ 0.99939644,0.00060355663,1
1668
+ 0.9978956,0.0021044016,1
1669
+ 0.003047505,0.9969525,0
1670
+ 0.99212193,0.007878065,1
1671
+ 0.0013971839,0.9986028,0
1672
+ 0.007666092,0.9923339,0
1673
+ 0.002598066,0.99740195,0
1674
+ 0.12155999,0.87844,0
1675
+ 0.99642074,0.003579259,1
1676
+ 0.99969435,0.00030565262,1
1677
+ 0.001120927,0.9988791,0
1678
+ 0.00305398,0.99694604,0
1679
+ 0.99831665,0.0016833544,1
1680
+ 0.99961925,0.00038075447,1
1681
+ 0.08072966,0.91927034,0
1682
+ 0.99743855,0.00256145,1
1683
+ 0.9852321,0.014767885,1
1684
+ 0.08390233,0.91609764,0
1685
+ 0.0032026707,0.9967973,0
1686
+ 0.9849311,0.015068889,1
1687
+ 0.98837703,0.011622965,1
1688
+ 0.08748023,0.91251975,0
1689
+ 0.7383503,0.26164973,1
1690
+ 0.99709857,0.002901435,1
1691
+ 0.044292193,0.9557078,0
1692
+ 0.9498848,0.050115228,1
1693
+ 0.0021460515,0.99785393,0
1694
+ 0.0011546947,0.9988453,0
1695
+ 0.004270598,0.9957294,0
1696
+ 0.677085,0.32291502,1
1697
+ 0.008531692,0.9914683,0
1698
+ 0.0070538986,0.9929461,0
1699
+ 0.012215663,0.9877843,0
1700
+ 0.5241081,0.4758919,1
1701
+ 0.9736936,0.02630639,1
1702
+ 0.99968517,0.00031483173,1
1703
+ 0.0027774388,0.99722254,0
1704
+ 0.9997433,0.0002567172,1
1705
+ 0.016347442,0.98365253,0
1706
+ 0.99882275,0.0011772513,1
1707
+ 0.9983644,0.001635611,1
1708
+ 0.08831814,0.9116819,0
1709
+ 0.00734736,0.99265265,0
1710
+ 0.0031174822,0.9968825,0
1711
+ 0.9997229,0.000277102,1
1712
+ 0.018943774,0.9810562,0
1713
+ 0.67957735,0.32042265,1
1714
+ 0.9989209,0.0010790825,1
1715
+ 0.9996575,0.0003424883,1
1716
+ 0.028038539,0.97196144,0
1717
+ 0.99960655,0.00039345026,1
1718
+ 0.0025850143,0.997415,0
1719
+ 0.22348732,0.7765127,0
1720
+ 0.04243178,0.9575682,0
1721
+ 0.19639087,0.80360913,0
1722
+ 0.003479775,0.9965202,0
1723
+ 0.99964356,0.00035643578,1
1724
+ 0.049922813,0.9500772,0
1725
+ 0.017004436,0.98299557,0
1726
+ 0.7548002,0.2451998,1
1727
+ 0.0038676967,0.9961323,0
1728
+ 0.9990693,0.0009307265,1
1729
+ 0.0021761844,0.99782383,0
1730
+ 0.010882482,0.9891175,0
1731
+ 0.48742148,0.5125785,0
1732
+ 0.0044121235,0.9955879,0
1733
+ 0.33832738,0.6616726,0
1734
+ 0.011041878,0.9889581,0
1735
+ 0.0064772074,0.99352276,0
1736
+ 0.038636003,0.961364,0
1737
+ 0.13214126,0.86785877,0
1738
+ 0.006988656,0.99301136,0
1739
+ 0.99929476,0.00070524216,1
1740
+ 0.0059393826,0.99406064,0
1741
+ 0.92992014,0.07007986,1
1742
+ 0.8966881,0.1033119,1
1743
+ 0.0025808366,0.9974192,0
1744
+ 0.9727023,0.027297676,1
1745
+ 0.0070771486,0.99292284,0
1746
+ 0.00093023677,0.99906975,0
1747
+ 0.018261585,0.9817384,0
1748
+ 0.9997098,0.00029021502,1
1749
+ 0.0034556133,0.99654436,0
1750
+ 0.9995065,0.00049352646,1
1751
+ 0.002245517,0.99775445,0
1752
+ 0.030413054,0.96958697,0
1753
+ 0.9841485,0.015851498,1
1754
+ 0.9795884,0.02041161,1
1755
+ 0.20530094,0.7946991,0
1756
+ 0.0060509862,0.993949,0
1757
+ 0.01887886,0.9811211,0
1758
+ 0.97609997,0.023900032,1
1759
+ 0.99966943,0.00033056736,1
1760
+ 0.99840194,0.0015980601,1
1761
+ 0.0019324615,0.99806756,0
1762
+ 0.94006246,0.059937537,1
1763
+ 0.0051722433,0.99482775,0
1764
+ 0.9993222,0.000677824,1
1765
+ 0.0012218539,0.99877816,0
1766
+ 0.0009993113,0.99900067,0
1767
+ 0.9992186,0.0007814169,1
1768
+ 0.017290143,0.9827099,0
1769
+ 0.0034629924,0.996537,0
1770
+ 0.0047165914,0.9952834,0
1771
+ 0.012862803,0.9871372,0
1772
+ 0.0039547123,0.9960453,0
1773
+ 0.9990871,0.00091290474,1
1774
+ 0.99969196,0.0003080368,1
1775
+ 0.9996829,0.0003170967,1
1776
+ 0.99929476,0.00070524216,1
1777
+ 0.99896836,0.0010316372,1
1778
+ 0.007704763,0.99229527,0
1779
+ 0.99762017,0.0023798347,1
1780
+ 0.97065103,0.02934897,1
1781
+ 0.24630916,0.75369084,0
1782
+ 0.001178508,0.9988215,0
1783
+ 0.9995461,0.00045388937,1
1784
+ 0.47149187,0.5285081,0
1785
+ 0.99930656,0.00069344044,1
1786
+ 0.027528241,0.9724718,0
1787
+ 0.9996438,0.00035619736,1
1788
+ 0.074102916,0.92589706,0
1789
+ 0.0036210488,0.99637896,0
1790
+ 0.99295956,0.007040441,1
1791
+ 0.035725683,0.9642743,0
1792
+ 0.99973565,0.0002643466,1
1793
+ 0.013315974,0.986684,0
1794
+ 0.0014894401,0.99851054,0
1795
+ 0.9997009,0.0002990961,1
1796
+ 0.9994997,0.0005003214,1
1797
+ 0.9977241,0.0022758842,1
1798
+ 0.0020170046,0.997983,0
1799
+ 0.9995598,0.0004401803,1
1800
+ 0.9992637,0.0007362962,1
1801
+ 0.9997178,0.000282228,1
1802
+ 0.08650549,0.9134945,0
1803
+ 0.0054886,0.9945114,0
1804
+ 0.0010492286,0.9989508,0
1805
+ 0.9968765,0.0031235218,1
1806
+ 0.14038801,0.859612,0
1807
+ 0.9952773,0.0047227144,1
1808
+ 0.7962036,0.20379639,1
1809
+ 0.15651307,0.8434869,0
1810
+ 0.0012005005,0.9987995,0
1811
+ 0.024014043,0.97598594,0
1812
+ 0.0014820986,0.99851793,0
1813
+ 0.9997528,0.00024718046,1
1814
+ 0.76989216,0.23010784,1
1815
+ 0.0062649166,0.9937351,0
1816
+ 0.99131846,0.008681536,1
1817
+ 0.0052881422,0.9947119,0
1818
+ 0.022201896,0.9777981,0
1819
+ 0.0015704348,0.99842954,0
1820
+ 0.0031845067,0.9968155,0
1821
+ 0.008904114,0.9910959,0
1822
+ 0.001691829,0.9983082,0
examples/AutoClsSST_SST-2/Transformer-Hybrid-Augmentation-Sentiment/res/output/test_prediction_epoch_3.csv ADDED
@@ -0,0 +1,1822 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ prob_1,prob_0,prediction
2
+ 0.005800618,0.9941994,0
3
+ 0.03543998,0.96456003,0
4
+ 0.0006062591,0.99939376,0
5
+ 0.0059438576,0.99405617,0
6
+ 0.9998548,0.00014519691,1
7
+ 0.9998592,0.00014078617,1
8
+ 0.34030315,0.6596968,0
9
+ 0.9995204,0.00047957897,1
10
+ 0.102881424,0.89711857,0
11
+ 0.002295297,0.9977047,0
12
+ 0.8776327,0.12236732,1
13
+ 0.0008060184,0.99919397,0
14
+ 0.9900046,0.009995401,1
15
+ 0.9934818,0.006518185,1
16
+ 0.0012471005,0.9987529,0
17
+ 0.99922395,0.0007760525,1
18
+ 0.90722793,0.09277207,1
19
+ 0.0014941585,0.99850583,0
20
+ 0.0038802626,0.99611974,0
21
+ 0.74026257,0.25973743,1
22
+ 0.9998652,0.0001348257,1
23
+ 0.0134572815,0.9865427,0
24
+ 0.9679611,0.032038927,1
25
+ 0.9998826,0.00011742115,1
26
+ 0.0008309179,0.9991691,0
27
+ 0.0020393361,0.9979607,0
28
+ 0.0038753832,0.9961246,0
29
+ 0.9997545,0.00024551153,1
30
+ 0.003696539,0.99630344,0
31
+ 0.9997174,0.00028258562,1
32
+ 0.99969256,0.00030744076,1
33
+ 0.00087147654,0.9991285,0
34
+ 0.9998785,0.000121474266,1
35
+ 0.9996207,0.00037932396,1
36
+ 0.9998957,0.00010430813,1
37
+ 0.995103,0.0048969984,1
38
+ 0.9988065,0.0011935234,1
39
+ 0.989737,0.010263026,1
40
+ 0.15397856,0.8460214,0
41
+ 0.0015394306,0.9984606,0
42
+ 0.9998381,0.00016188622,1
43
+ 0.9996854,0.00031459332,1
44
+ 0.0018210895,0.9981789,0
45
+ 0.037155125,0.96284485,0
46
+ 0.0005116888,0.9994883,0
47
+ 0.99989796,0.00010204315,1
48
+ 0.99951935,0.00048065186,1
49
+ 0.8770765,0.12292349,1
50
+ 0.00054980244,0.9994502,0
51
+ 0.6639618,0.33603817,1
52
+ 0.0008272558,0.99917275,0
53
+ 0.9998394,0.00016057491,1
54
+ 0.99937767,0.0006223321,1
55
+ 0.0005221375,0.99947786,0
56
+ 0.0013906898,0.9986093,0
57
+ 0.99985325,0.00014674664,1
58
+ 0.013882468,0.98611754,0
59
+ 0.90347254,0.09652746,1
60
+ 0.042404525,0.95759547,0
61
+ 0.019674951,0.98032504,0
62
+ 0.9998841,0.00011587143,1
63
+ 0.0059580575,0.9940419,0
64
+ 0.0020506168,0.99794936,0
65
+ 0.6146617,0.3853383,1
66
+ 0.99973196,0.0002680421,1
67
+ 0.99814713,0.00185287,1
68
+ 0.99986553,0.00013446808,1
69
+ 0.00046437062,0.9995356,0
70
+ 0.00107018,0.9989298,0
71
+ 0.88608235,0.11391765,1
72
+ 0.99977714,0.00022286177,1
73
+ 0.0067651807,0.9932348,0
74
+ 0.008446162,0.99155384,0
75
+ 0.9997074,0.0002925992,1
76
+ 0.99865365,0.0013463497,1
77
+ 0.98266715,0.017332852,1
78
+ 0.9997911,0.00020891428,1
79
+ 0.8690063,0.13099372,1
80
+ 0.60922366,0.39077634,1
81
+ 0.0011655022,0.9988345,0
82
+ 0.0024779744,0.997522,0
83
+ 0.0013894478,0.99861056,0
84
+ 0.0048725964,0.9951274,0
85
+ 0.0005463038,0.9994537,0
86
+ 0.8572365,0.1427635,1
87
+ 0.9905123,0.009487689,1
88
+ 0.37525678,0.6247432,0
89
+ 0.99640334,0.0035966635,1
90
+ 0.00060496735,0.999395,0
91
+ 0.0018311405,0.9981689,0
92
+ 0.9995297,0.00047028065,1
93
+ 0.99987197,0.00012803078,1
94
+ 0.9991824,0.0008175969,1
95
+ 0.22240312,0.7775969,0
96
+ 0.9924003,0.0075997114,1
97
+ 0.9996699,0.00033009052,1
98
+ 0.25822583,0.7417742,0
99
+ 0.0017091532,0.99829084,0
100
+ 0.000755797,0.9992442,0
101
+ 0.8783009,0.121699095,1
102
+ 0.9998684,0.00013160706,1
103
+ 0.9998807,0.0001193285,1
104
+ 0.006061212,0.9939388,0
105
+ 0.9843239,0.015676081,1
106
+ 0.023067366,0.97693264,0
107
+ 0.114602745,0.88539726,0
108
+ 0.9986351,0.0013648868,1
109
+ 0.999342,0.0006579757,1
110
+ 0.9998522,0.00014781952,1
111
+ 0.02997451,0.9700255,0
112
+ 0.0011424527,0.99885756,0
113
+ 0.0052665845,0.9947334,0
114
+ 0.23054704,0.7694529,0
115
+ 0.00902422,0.9909758,0
116
+ 0.9991375,0.0008624792,1
117
+ 0.06430091,0.9356991,0
118
+ 0.00051054766,0.9994894,0
119
+ 0.9423572,0.057642817,1
120
+ 0.13067152,0.8693285,0
121
+ 0.99984705,0.00015294552,1
122
+ 0.999526,0.00047397614,1
123
+ 0.85781115,0.14218885,1
124
+ 0.99936503,0.0006349683,1
125
+ 0.99986625,0.00013375282,1
126
+ 0.99482733,0.00517267,1
127
+ 0.47006813,0.5299319,0
128
+ 0.059286185,0.9407138,0
129
+ 0.2450508,0.7549492,0
130
+ 0.015855374,0.9841446,0
131
+ 0.99959594,0.0004040599,1
132
+ 0.0008268526,0.99917316,0
133
+ 0.7905066,0.2094934,1
134
+ 0.005435629,0.99456435,0
135
+ 0.0035750538,0.996425,0
136
+ 0.9087756,0.09122437,1
137
+ 0.0009636998,0.9990363,0
138
+ 0.98815084,0.011849165,1
139
+ 0.002286675,0.9977133,0
140
+ 0.9995116,0.00048840046,1
141
+ 0.8964959,0.10350412,1
142
+ 0.9998641,0.00013589859,1
143
+ 0.89944863,0.10055137,1
144
+ 0.33997828,0.6600217,0
145
+ 0.0025799852,0.99742,0
146
+ 0.9958467,0.0041533113,1
147
+ 0.002463492,0.9975365,0
148
+ 0.99781054,0.0021894574,1
149
+ 0.00069799693,0.999302,0
150
+ 0.9995981,0.00040191412,1
151
+ 0.017287826,0.98271215,0
152
+ 0.0015942485,0.99840575,0
153
+ 0.0009854941,0.9990145,0
154
+ 0.014701575,0.9852984,0
155
+ 0.9988728,0.0011271834,1
156
+ 0.000757144,0.99924284,0
157
+ 0.00101958,0.9989804,0
158
+ 0.9998012,0.00019878149,1
159
+ 0.99984074,0.00015926361,1
160
+ 0.02117177,0.97882825,0
161
+ 0.9998586,0.00014138222,1
162
+ 0.00063293654,0.99936706,0
163
+ 0.99044925,0.00955075,1
164
+ 0.99987113,0.00012886524,1
165
+ 0.9997563,0.00024372339,1
166
+ 0.9998385,0.00016152859,1
167
+ 0.99813616,0.0018638372,1
168
+ 0.92019886,0.07980114,1
169
+ 0.9901661,0.009833872,1
170
+ 0.9998547,0.00014531612,1
171
+ 0.001896634,0.9981034,0
172
+ 0.021639923,0.97836006,0
173
+ 0.9998671,0.00013291836,1
174
+ 0.0010364936,0.99896353,0
175
+ 0.0055420375,0.99445796,0
176
+ 0.9998437,0.00015628338,1
177
+ 0.9998388,0.00016117096,1
178
+ 0.9984315,0.0015684962,1
179
+ 0.99982244,0.00017756224,1
180
+ 0.9998055,0.00019448996,1
181
+ 0.002396081,0.9976039,0
182
+ 0.00079243834,0.99920756,0
183
+ 0.9993339,0.0006660819,1
184
+ 0.9998479,0.00015211105,1
185
+ 0.041841388,0.9581586,0
186
+ 0.9954254,0.004574597,1
187
+ 0.999846,0.0001540184,1
188
+ 0.000589527,0.99941045,0
189
+ 0.9983859,0.0016140938,1
190
+ 0.14234424,0.85765576,0
191
+ 0.9968184,0.0031815767,1
192
+ 0.0031516473,0.99684834,0
193
+ 0.45766348,0.5423365,0
194
+ 0.99905676,0.0009432435,1
195
+ 0.9997588,0.00024122,1
196
+ 0.0006570244,0.999343,0
197
+ 0.9996561,0.0003439188,1
198
+ 0.9998957,0.00010430813,1
199
+ 0.0007958502,0.99920416,0
200
+ 0.9998665,0.0001335144,1
201
+ 0.0015212462,0.9984788,0
202
+ 0.9999008,9.918213e-05,1
203
+ 0.0018878883,0.9981121,0
204
+ 0.00060529145,0.9993947,0
205
+ 0.0010872538,0.99891275,0
206
+ 0.9998851,0.000114917755,1
207
+ 0.0026411829,0.9973588,0
208
+ 0.24844041,0.7515596,0
209
+ 0.010122286,0.9898777,0
210
+ 0.99864894,0.0013510585,1
211
+ 0.9993337,0.0006663203,1
212
+ 0.9998344,0.0001655817,1
213
+ 0.9997683,0.00023168325,1
214
+ 0.002555696,0.99744433,0
215
+ 0.9983109,0.0016890764,1
216
+ 0.0009031658,0.9990968,0
217
+ 0.0019508306,0.99804914,0
218
+ 0.00095690455,0.9990431,0
219
+ 0.99985516,0.00014483929,1
220
+ 0.0024601198,0.9975399,0
221
+ 0.0183025,0.9816975,0
222
+ 0.0044627967,0.9955372,0
223
+ 0.984977,0.015022993,1
224
+ 0.012544495,0.9874555,0
225
+ 0.9420592,0.05794078,1
226
+ 0.9998876,0.00011241436,1
227
+ 0.9993587,0.0006412864,1
228
+ 0.99986184,0.00013816357,1
229
+ 0.9997408,0.0002592206,1
230
+ 0.8694936,0.1305064,1
231
+ 0.00054534886,0.9994547,0
232
+ 0.00071757793,0.9992824,0
233
+ 0.0005161785,0.9994838,0
234
+ 0.9998499,0.0001500845,1
235
+ 0.99865484,0.0013451576,1
236
+ 0.99984205,0.00015795231,1
237
+ 0.99986255,0.00013744831,1
238
+ 0.00042133505,0.99957865,0
239
+ 0.99988127,0.00011873245,1
240
+ 0.9947001,0.005299926,1
241
+ 0.99941015,0.00058984756,1
242
+ 0.99956363,0.0004363656,1
243
+ 0.014314164,0.9856858,0
244
+ 0.99977607,0.00022393465,1
245
+ 0.51993275,0.48006725,1
246
+ 0.99978787,0.00021213293,1
247
+ 0.72592735,0.27407265,1
248
+ 0.9997986,0.0002014041,1
249
+ 0.999587,0.00041300058,1
250
+ 0.0005878348,0.9994122,0
251
+ 0.99891615,0.0010838509,1
252
+ 0.99764353,0.0023564696,1
253
+ 0.97991246,0.02008754,1
254
+ 0.9998869,0.000113129616,1
255
+ 0.0027694337,0.9972306,0
256
+ 0.0034980772,0.9965019,0
257
+ 0.99984217,0.0001578331,1
258
+ 0.0005145817,0.99948543,0
259
+ 0.9998387,0.00016129017,1
260
+ 0.6415402,0.35845977,1
261
+ 0.99988055,0.00011944771,1
262
+ 0.0072037457,0.99279624,0
263
+ 0.9997634,0.00023657084,1
264
+ 0.0023045638,0.99769545,0
265
+ 0.0004702039,0.9995298,0
266
+ 0.99986136,0.0001386404,1
267
+ 0.9997776,0.00022238493,1
268
+ 0.00054918864,0.9994508,0
269
+ 0.9998548,0.00014519691,1
270
+ 0.999894,0.00010597706,1
271
+ 0.9985904,0.0014095902,1
272
+ 0.0057750004,0.994225,0
273
+ 0.0035004416,0.99649954,0
274
+ 0.0020544964,0.9979455,0
275
+ 0.9997913,0.00020867586,1
276
+ 0.9994485,0.0005515218,1
277
+ 0.13931644,0.86068356,0
278
+ 0.0029267678,0.99707323,0
279
+ 0.0011578845,0.9988421,0
280
+ 0.99984765,0.00015234947,1
281
+ 0.99877554,0.0012244582,1
282
+ 0.9996668,0.00033318996,1
283
+ 0.0018964029,0.9981036,0
284
+ 0.999853,0.00014698505,1
285
+ 0.0008680563,0.9991319,0
286
+ 0.7702868,0.2297132,1
287
+ 0.9984927,0.0015072823,1
288
+ 0.9995919,0.000408113,1
289
+ 0.9998388,0.00016117096,1
290
+ 0.9998023,0.0001977086,1
291
+ 0.0052349693,0.99476504,0
292
+ 0.0005658485,0.9994342,0
293
+ 0.9996965,0.00030350685,1
294
+ 0.0062834206,0.9937166,0
295
+ 0.001283825,0.9987162,0
296
+ 0.0010458067,0.9989542,0
297
+ 0.0016899407,0.9983101,0
298
+ 0.9999058,9.417534e-05,1
299
+ 0.9998895,0.00011050701,1
300
+ 0.99937695,0.00062304735,1
301
+ 0.0023701885,0.9976298,0
302
+ 0.99988675,0.000113248825,1
303
+ 0.9860839,0.013916075,1
304
+ 0.075747736,0.9242523,0
305
+ 0.999884,0.00011599064,1
306
+ 0.010250314,0.98974967,0
307
+ 0.0744432,0.9255568,0
308
+ 0.9997172,0.00028282404,1
309
+ 0.018830424,0.9811696,0
310
+ 0.97839797,0.021602035,1
311
+ 0.99976593,0.00023406744,1
312
+ 0.0005554082,0.9994446,0
313
+ 0.99984634,0.00015366077,1
314
+ 0.0016628837,0.9983371,0
315
+ 0.99981743,0.00018256903,1
316
+ 0.99914694,0.0008530617,1
317
+ 0.042176344,0.95782363,0
318
+ 0.988908,0.011092007,1
319
+ 0.9985807,0.0014193058,1
320
+ 0.9998498,0.0001502037,1
321
+ 0.99653155,0.003468454,1
322
+ 0.99952626,0.00047373772,1
323
+ 0.9997923,0.00020772219,1
324
+ 0.0018778285,0.99812216,0
325
+ 0.08521888,0.9147811,0
326
+ 0.0004155631,0.99958444,0
327
+ 0.0007519607,0.999248,0
328
+ 0.0007506708,0.99924934,0
329
+ 0.9923235,0.007676482,1
330
+ 0.008666018,0.99133396,0
331
+ 0.9998317,0.00016832352,1
332
+ 0.007810344,0.99218965,0
333
+ 0.9991714,0.0008286238,1
334
+ 0.010172078,0.98982793,0
335
+ 0.99985766,0.00014233589,1
336
+ 0.98437226,0.015627742,1
337
+ 0.9992987,0.00070130825,1
338
+ 0.0011159946,0.998884,0
339
+ 0.99990225,9.775162e-05,1
340
+ 0.118473694,0.8815263,0
341
+ 0.99987495,0.00012505054,1
342
+ 0.25792348,0.7420765,0
343
+ 0.9998925,0.00010752678,1
344
+ 0.06789507,0.93210495,0
345
+ 0.0004972471,0.9995028,0
346
+ 0.998321,0.0016790032,1
347
+ 0.0018729664,0.99812704,0
348
+ 0.9998883,0.000111699104,1
349
+ 0.03839427,0.9616057,0
350
+ 0.99986017,0.0001398325,1
351
+ 0.07505488,0.9249451,0
352
+ 0.9997371,0.0002629161,1
353
+ 0.9973911,0.0026088953,1
354
+ 0.0076537253,0.9923463,0
355
+ 0.001932088,0.9980679,0
356
+ 0.052779566,0.94722044,0
357
+ 0.004300658,0.99569935,0
358
+ 0.99988055,0.00011944771,1
359
+ 0.0034760495,0.996524,0
360
+ 0.0010645377,0.99893546,0
361
+ 0.9998442,0.00015580654,1
362
+ 0.9971699,0.0028300881,1
363
+ 0.5788319,0.4211681,1
364
+ 0.94375914,0.056240857,1
365
+ 0.99960464,0.0003953576,1
366
+ 0.022439985,0.97756004,0
367
+ 0.99970156,0.00029844046,1
368
+ 0.025717238,0.97428274,0
369
+ 0.9987423,0.0012577176,1
370
+ 0.019903792,0.9800962,0
371
+ 0.006889142,0.99311084,0
372
+ 0.16333824,0.83666176,0
373
+ 0.003388778,0.99661124,0
374
+ 0.99986506,0.00013494492,1
375
+ 0.9998627,0.0001373291,1
376
+ 0.48896998,0.51103,0
377
+ 0.9998472,0.00015282631,1
378
+ 0.014986517,0.9850135,0
379
+ 0.41831702,0.581683,0
380
+ 0.28469536,0.7153046,0
381
+ 0.2249478,0.7750522,0
382
+ 0.028216736,0.9717833,0
383
+ 0.9997185,0.00028151274,1
384
+ 0.0023198924,0.9976801,0
385
+ 0.11918487,0.88081515,0
386
+ 0.9418713,0.058128715,1
387
+ 0.99984264,0.00015735626,1
388
+ 0.0015010479,0.998499,0
389
+ 0.99984527,0.00015473366,1
390
+ 0.00052923,0.9994708,0
391
+ 0.9997465,0.00025349855,1
392
+ 0.004061304,0.9959387,0
393
+ 0.99979407,0.00020593405,1
394
+ 0.99854076,0.0014592409,1
395
+ 0.0029245939,0.9970754,0
396
+ 0.5928229,0.4071771,1
397
+ 0.002285224,0.99771476,0
398
+ 0.0040073725,0.9959926,0
399
+ 0.0009243019,0.9990757,0
400
+ 0.018714832,0.98128515,0
401
+ 0.015538934,0.98446107,0
402
+ 0.010657583,0.9893424,0
403
+ 0.9989318,0.0010681748,1
404
+ 0.00093897904,0.99906105,0
405
+ 0.99957234,0.00042766333,1
406
+ 0.016738689,0.9832613,0
407
+ 0.99973947,0.0002605319,1
408
+ 0.001109251,0.99889076,0
409
+ 0.00063022395,0.9993698,0
410
+ 0.99979705,0.00020295382,1
411
+ 0.9998709,0.00012910366,1
412
+ 0.0013820207,0.998618,0
413
+ 0.00082557806,0.9991744,0
414
+ 0.98632777,0.013672233,1
415
+ 0.997209,0.0027909875,1
416
+ 0.026450869,0.9735491,0
417
+ 0.03953617,0.9604638,0
418
+ 0.0039685112,0.99603146,0
419
+ 0.9997968,0.00020319223,1
420
+ 0.00048351713,0.9995165,0
421
+ 0.9998419,0.00015807152,1
422
+ 0.9994481,0.0005518794,1
423
+ 0.0007115701,0.99928844,0
424
+ 0.9998568,0.00014317036,1
425
+ 0.0008494439,0.9991506,0
426
+ 0.00082795916,0.99917203,0
427
+ 0.9912547,0.008745313,1
428
+ 0.0033020705,0.9966979,0
429
+ 0.0041158493,0.9958842,0
430
+ 0.99987984,0.000120162964,1
431
+ 0.8334709,0.16652912,1
432
+ 0.00092876574,0.99907124,0
433
+ 0.9997831,0.0002169013,1
434
+ 0.8697313,0.1302687,1
435
+ 0.9993548,0.0006452203,1
436
+ 0.9981652,0.0018348098,1
437
+ 0.9994387,0.00056129694,1
438
+ 0.0018370767,0.9981629,0
439
+ 0.0791304,0.9208696,0
440
+ 0.9996238,0.00037622452,1
441
+ 0.0065772003,0.9934228,0
442
+ 0.00079947506,0.9992005,0
443
+ 0.00074114243,0.9992589,0
444
+ 0.00070237624,0.9992976,0
445
+ 0.0027764747,0.9972235,0
446
+ 0.9998055,0.00019448996,1
447
+ 0.99983454,0.0001654625,1
448
+ 0.14362045,0.85637957,0
449
+ 0.9994529,0.00054711103,1
450
+ 0.9559455,0.04405451,1
451
+ 0.4089555,0.5910445,0
452
+ 0.0026831285,0.9973169,0
453
+ 0.001094279,0.9989057,0
454
+ 0.0008854403,0.9991146,0
455
+ 0.997773,0.0022270083,1
456
+ 0.99895513,0.0010448694,1
457
+ 0.9998795,0.00012052059,1
458
+ 0.0035480591,0.9964519,0
459
+ 0.999673,0.00032699108,1
460
+ 0.9997538,0.0002462268,1
461
+ 0.99921954,0.0007804632,1
462
+ 0.0011392849,0.9988607,0
463
+ 0.9997646,0.00023537874,1
464
+ 0.99782395,0.0021760464,1
465
+ 0.00044304106,0.99955696,0
466
+ 0.038192105,0.9618079,0
467
+ 0.019001365,0.98099864,0
468
+ 0.026953066,0.97304696,0
469
+ 0.9896236,0.010376394,1
470
+ 0.99989355,0.000106453896,1
471
+ 0.016878832,0.98312116,0
472
+ 0.012579949,0.98742,0
473
+ 0.9995414,0.00045859814,1
474
+ 0.9997923,0.00020772219,1
475
+ 0.99840826,0.001591742,1
476
+ 0.999889,0.00011098385,1
477
+ 0.02325056,0.9767494,0
478
+ 0.99986565,0.00013434887,1
479
+ 0.29294947,0.70705056,0
480
+ 0.99970347,0.0002965331,1
481
+ 0.99984527,0.00015473366,1
482
+ 0.9998621,0.00013792515,1
483
+ 0.99977463,0.00022536516,1
484
+ 0.4495322,0.5504678,0
485
+ 0.03357672,0.9664233,0
486
+ 0.0006354361,0.99936455,0
487
+ 0.99987876,0.00012123585,1
488
+ 0.9925897,0.007410288,1
489
+ 0.031892374,0.96810764,0
490
+ 0.98179215,0.018207848,1
491
+ 0.12399734,0.87600267,0
492
+ 0.99989486,0.00010514259,1
493
+ 0.9997458,0.0002542138,1
494
+ 0.0007519976,0.999248,0
495
+ 0.99989426,0.00010573864,1
496
+ 0.99957114,0.00042885542,1
497
+ 0.9998561,0.00014388561,1
498
+ 0.0043803067,0.9956197,0
499
+ 0.016936686,0.98306334,0
500
+ 0.06253627,0.93746376,0
501
+ 0.025673332,0.97432667,0
502
+ 0.95098543,0.04901457,1
503
+ 0.0031992656,0.9968007,0
504
+ 0.9998479,0.00015211105,1
505
+ 0.9983741,0.0016258955,1
506
+ 0.99987483,0.00012516975,1
507
+ 0.99581677,0.004183233,1
508
+ 0.9998939,0.00010609627,1
509
+ 0.00092442654,0.9990756,0
510
+ 0.98451066,0.01548934,1
511
+ 0.99983656,0.00016343594,1
512
+ 0.93411744,0.06588256,1
513
+ 0.0017105296,0.99828947,0
514
+ 0.9998442,0.00015580654,1
515
+ 0.003613748,0.99638623,0
516
+ 0.045177538,0.9548225,0
517
+ 0.0032809428,0.99671906,0
518
+ 0.36017603,0.639824,0
519
+ 0.9998741,0.00012588501,1
520
+ 0.00061966863,0.99938035,0
521
+ 0.00066845835,0.99933153,0
522
+ 0.002112442,0.99788755,0
523
+ 0.0005944924,0.9994055,0
524
+ 0.011979032,0.98802096,0
525
+ 0.0030433424,0.99695665,0
526
+ 0.94837475,0.05162525,1
527
+ 0.036320463,0.96367955,0
528
+ 0.9983854,0.0016145706,1
529
+ 0.11826001,0.88174,0
530
+ 0.016161468,0.98383856,0
531
+ 0.12837903,0.87162095,0
532
+ 0.0044554686,0.99554455,0
533
+ 0.99973756,0.00026243925,1
534
+ 0.99981195,0.00018805265,1
535
+ 0.99976593,0.00023406744,1
536
+ 0.99938273,0.0006172657,1
537
+ 0.001182455,0.99881756,0
538
+ 0.99986315,0.00013685226,1
539
+ 0.99885964,0.0011403561,1
540
+ 0.19853896,0.80146104,0
541
+ 0.99978346,0.00021654367,1
542
+ 0.0018394268,0.9981606,0
543
+ 0.99988556,0.00011444092,1
544
+ 0.065095514,0.93490446,0
545
+ 0.99875915,0.0012408495,1
546
+ 0.999585,0.00041502714,1
547
+ 0.0037699025,0.9962301,0
548
+ 0.3452647,0.6547353,0
549
+ 0.99779886,0.00220114,1
550
+ 0.9942942,0.005705774,1
551
+ 0.9998697,0.00013029575,1
552
+ 0.02072965,0.97927034,0
553
+ 0.0006015418,0.99939847,0
554
+ 0.0036333636,0.9963666,0
555
+ 0.99987376,0.00012624264,1
556
+ 0.99905616,0.00094383955,1
557
+ 0.397876,0.602124,0
558
+ 0.9997857,0.0002142787,1
559
+ 0.099703066,0.9002969,0
560
+ 0.0021345394,0.99786544,0
561
+ 0.68352956,0.31647044,1
562
+ 0.003207387,0.9967926,0
563
+ 0.9998776,0.00012242794,1
564
+ 0.9992874,0.0007125735,1
565
+ 0.99987423,0.0001257658,1
566
+ 0.016605282,0.98339474,0
567
+ 0.9998273,0.00017267466,1
568
+ 0.9824265,0.017573476,1
569
+ 0.008456284,0.9915437,0
570
+ 0.9995999,0.00040012598,1
571
+ 0.9994691,0.0005308986,1
572
+ 0.9998697,0.00013029575,1
573
+ 0.9997912,0.00020879507,1
574
+ 0.9987301,0.001269877,1
575
+ 0.027897669,0.97210234,0
576
+ 0.0003929757,0.999607,0
577
+ 0.28543198,0.714568,0
578
+ 0.0024395185,0.9975605,0
579
+ 0.99984205,0.00015795231,1
580
+ 0.9900621,0.009937882,1
581
+ 0.8968516,0.1031484,1
582
+ 0.9997316,0.00026839972,1
583
+ 0.9998839,0.00011610985,1
584
+ 0.99982363,0.00017637014,1
585
+ 0.9892163,0.010783672,1
586
+ 0.998728,0.0012720227,1
587
+ 0.9998375,0.00016248226,1
588
+ 0.0014193807,0.99858063,0
589
+ 0.0019878424,0.9980122,0
590
+ 0.0014880586,0.99851197,0
591
+ 0.99986076,0.00013923645,1
592
+ 0.0007508283,0.99924916,0
593
+ 0.04265648,0.9573435,0
594
+ 0.007234593,0.9927654,0
595
+ 0.99968743,0.00031256676,1
596
+ 0.9983088,0.0016912222,1
597
+ 0.00058504683,0.999415,0
598
+ 0.99975055,0.00024944544,1
599
+ 0.003092134,0.9969079,0
600
+ 0.00069175474,0.9993082,0
601
+ 0.019222543,0.98077744,0
602
+ 0.9994475,0.00055247545,1
603
+ 0.99928576,0.00071424246,1
604
+ 0.99457437,0.005425632,1
605
+ 0.07292954,0.92707044,0
606
+ 0.00051635865,0.99948364,0
607
+ 0.0014454618,0.9985545,0
608
+ 0.73851347,0.26148653,1
609
+ 0.99740344,0.0025965571,1
610
+ 0.0013606326,0.99863935,0
611
+ 0.5565983,0.4434017,1
612
+ 0.00081684045,0.9991832,0
613
+ 0.13269113,0.86730886,0
614
+ 0.9955844,0.0044155717,1
615
+ 0.0005698359,0.9994302,0
616
+ 0.9950264,0.0049735904,1
617
+ 0.0018526448,0.99814737,0
618
+ 0.9997874,0.00021260977,1
619
+ 0.35825998,0.64174,0
620
+ 0.9874091,0.012590885,1
621
+ 0.99974245,0.00025755167,1
622
+ 0.99955136,0.00044864416,1
623
+ 0.00065169414,0.9993483,0
624
+ 0.98095095,0.019049048,1
625
+ 0.6082616,0.3917384,1
626
+ 0.046237048,0.95376295,0
627
+ 0.0008011109,0.9991989,0
628
+ 0.99981874,0.00018125772,1
629
+ 0.99989915,0.00010085106,1
630
+ 0.948537,0.051463008,1
631
+ 0.9969693,0.0030307174,1
632
+ 0.9888526,0.01114738,1
633
+ 0.9998636,0.00013637543,1
634
+ 0.9998851,0.000114917755,1
635
+ 0.9544636,0.0455364,1
636
+ 0.9998555,0.00014448166,1
637
+ 0.003983615,0.9960164,0
638
+ 0.0013058977,0.9986941,0
639
+ 0.018018942,0.98198104,0
640
+ 0.9638857,0.036114275,1
641
+ 0.99957246,0.00042754412,1
642
+ 0.99979204,0.0002079606,1
643
+ 0.9998436,0.00015640259,1
644
+ 0.088740416,0.9112596,0
645
+ 0.0049414444,0.99505854,0
646
+ 0.8512725,0.14872748,1
647
+ 0.00055073027,0.99944925,0
648
+ 0.0015378923,0.9984621,0
649
+ 0.7797957,0.2202043,1
650
+ 0.9998816,0.000118374825,1
651
+ 0.51862866,0.48137134,1
652
+ 0.9998628,0.00013720989,1
653
+ 0.99807835,0.0019216537,1
654
+ 0.024881704,0.9751183,0
655
+ 0.99989295,0.00010704994,1
656
+ 0.99683446,0.003165543,1
657
+ 0.99824715,0.0017528534,1
658
+ 0.0007473141,0.9992527,0
659
+ 0.9970477,0.0029522777,1
660
+ 0.99974173,0.00025826693,1
661
+ 0.001984704,0.9980153,0
662
+ 0.00035851455,0.9996415,0
663
+ 0.99896264,0.0010373592,1
664
+ 0.0006995332,0.9993005,0
665
+ 0.9998821,0.00011789799,1
666
+ 0.9997887,0.00021129847,1
667
+ 0.99971503,0.0002849698,1
668
+ 0.9969049,0.0030950904,1
669
+ 0.99984837,0.00015163422,1
670
+ 0.0065129213,0.99348706,0
671
+ 0.0006309331,0.9993691,0
672
+ 0.8989326,0.101067424,1
673
+ 0.12730394,0.87269604,0
674
+ 0.9997764,0.00022357702,1
675
+ 0.0010476377,0.9989524,0
676
+ 0.0004905225,0.99950945,0
677
+ 0.011581958,0.98841804,0
678
+ 0.36620617,0.63379383,0
679
+ 0.34586284,0.65413713,0
680
+ 0.00036284697,0.9996371,0
681
+ 0.0014014964,0.9985985,0
682
+ 0.578242,0.421758,1
683
+ 0.023545286,0.97645473,0
684
+ 0.99918216,0.00081783533,1
685
+ 0.00038932858,0.99961066,0
686
+ 0.0016717727,0.9983282,0
687
+ 0.0009765718,0.99902344,0
688
+ 0.002707219,0.99729276,0
689
+ 0.00053377525,0.99946624,0
690
+ 0.99862623,0.0013737679,1
691
+ 0.001933626,0.99806637,0
692
+ 0.59228116,0.40771884,1
693
+ 0.0011632884,0.9988367,0
694
+ 0.0022466937,0.9977533,0
695
+ 0.9988181,0.0011819005,1
696
+ 0.9995732,0.00042682886,1
697
+ 0.99988115,0.00011885166,1
698
+ 0.0018504241,0.9981496,0
699
+ 0.99987054,0.00012946129,1
700
+ 0.9997807,0.00021928549,1
701
+ 0.99824166,0.001758337,1
702
+ 0.0116322255,0.9883678,0
703
+ 0.9996649,0.0003350973,1
704
+ 0.99982977,0.00017023087,1
705
+ 0.9996024,0.00039762259,1
706
+ 0.99984396,0.00015604496,1
707
+ 0.9998852,0.000114798546,1
708
+ 0.9996146,0.00038540363,1
709
+ 0.9996785,0.00032150745,1
710
+ 0.00065776,0.99934226,0
711
+ 0.00038170032,0.9996183,0
712
+ 0.9986632,0.001336813,1
713
+ 0.9833188,0.016681194,1
714
+ 0.98615533,0.013844669,1
715
+ 0.9996809,0.00031912327,1
716
+ 0.9941057,0.0058943033,1
717
+ 0.96495295,0.035047054,1
718
+ 0.99983835,0.0001616478,1
719
+ 0.051052198,0.9489478,0
720
+ 0.030856485,0.9691435,0
721
+ 0.0063465643,0.9936534,0
722
+ 0.025195805,0.9748042,0
723
+ 0.0021139686,0.997886,0
724
+ 0.9955635,0.004436493,1
725
+ 0.85092825,0.14907175,1
726
+ 0.87817454,0.12182546,1
727
+ 0.9998709,0.00012910366,1
728
+ 0.9974228,0.0025771856,1
729
+ 0.99568427,0.004315734,1
730
+ 0.009887373,0.9901126,0
731
+ 0.083263084,0.9167369,0
732
+ 0.0023533637,0.99764663,0
733
+ 0.0017193796,0.99828064,0
734
+ 0.0010816638,0.99891835,0
735
+ 0.99976856,0.00023144484,1
736
+ 0.11810675,0.8818933,0
737
+ 0.9998466,0.00015342236,1
738
+ 0.99954045,0.0004595518,1
739
+ 0.97049683,0.029503167,1
740
+ 0.9997904,0.00020962954,1
741
+ 0.9998847,0.00011527538,1
742
+ 0.62018067,0.37981933,1
743
+ 0.99982446,0.00017553568,1
744
+ 0.99985945,0.00014054775,1
745
+ 0.99528176,0.004718244,1
746
+ 0.7747988,0.22520119,1
747
+ 0.015135497,0.9848645,0
748
+ 0.99965537,0.00034463406,1
749
+ 0.999816,0.00018399954,1
750
+ 0.0031874748,0.9968125,0
751
+ 0.0032032933,0.9967967,0
752
+ 0.999882,0.0001180172,1
753
+ 0.9993967,0.0006033182,1
754
+ 0.6477392,0.35226083,1
755
+ 0.9958832,0.0041167736,1
756
+ 0.0013887084,0.9986113,0
757
+ 0.42373124,0.5762688,0
758
+ 0.9031008,0.09689921,1
759
+ 0.999739,0.00026100874,1
760
+ 0.91946846,0.08053154,1
761
+ 0.9998909,0.0001090765,1
762
+ 0.00837616,0.9916238,0
763
+ 0.005331507,0.9946685,0
764
+ 0.996067,0.0039330125,1
765
+ 0.99987185,0.00012814999,1
766
+ 0.6826431,0.31735688,1
767
+ 0.0006889698,0.99931103,0
768
+ 0.0019775406,0.99802244,0
769
+ 0.9987716,0.0012283921,1
770
+ 0.7863164,0.2136836,1
771
+ 0.99521494,0.004785061,1
772
+ 0.010195524,0.98980445,0
773
+ 0.9986986,0.0013014078,1
774
+ 0.9997811,0.00021892786,1
775
+ 0.9996517,0.00034832954,1
776
+ 0.9996195,0.00038051605,1
777
+ 0.99980015,0.00019985437,1
778
+ 0.04696931,0.9530307,0
779
+ 0.4626624,0.5373376,0
780
+ 0.051520154,0.94847983,0
781
+ 0.007973472,0.9920265,0
782
+ 0.03003946,0.9699606,0
783
+ 0.0060266717,0.9939733,0
784
+ 0.004246905,0.9957531,0
785
+ 0.050974093,0.9490259,0
786
+ 0.012137453,0.9878625,0
787
+ 0.99986756,0.00013244152,1
788
+ 0.9995401,0.00045990944,1
789
+ 0.0020989368,0.9979011,0
790
+ 0.99984026,0.00015974045,1
791
+ 0.84852463,0.15147537,1
792
+ 0.99969375,0.00030624866,1
793
+ 0.2308492,0.7691508,0
794
+ 0.9988944,0.0011056066,1
795
+ 0.0014477348,0.99855226,0
796
+ 0.0003655372,0.99963444,0
797
+ 0.4671276,0.53287244,0
798
+ 0.03742454,0.96257544,0
799
+ 0.99968326,0.00031673908,1
800
+ 0.00080849143,0.9991915,0
801
+ 0.0025127027,0.9974873,0
802
+ 0.0026244598,0.99737555,0
803
+ 0.99986506,0.00013494492,1
804
+ 0.9998522,0.00014781952,1
805
+ 0.0016745875,0.9983254,0
806
+ 0.97248614,0.027513862,1
807
+ 0.00091421464,0.9990858,0
808
+ 0.014230471,0.9857695,0
809
+ 0.99976045,0.00023955107,1
810
+ 0.0033379302,0.9966621,0
811
+ 0.993898,0.0061020255,1
812
+ 0.042577576,0.95742244,0
813
+ 0.70759535,0.29240465,1
814
+ 0.0061001866,0.9938998,0
815
+ 0.9998642,0.00013577938,1
816
+ 0.99986017,0.0001398325,1
817
+ 0.9997789,0.00022107363,1
818
+ 0.0017453748,0.9982546,0
819
+ 0.0022424776,0.9977575,0
820
+ 0.010837243,0.98916274,0
821
+ 0.9997925,0.00020748377,1
822
+ 0.0024992705,0.9975007,0
823
+ 0.0014197052,0.9985803,0
824
+ 0.00054235035,0.99945766,0
825
+ 0.9334023,0.0665977,1
826
+ 0.010303966,0.989696,0
827
+ 0.96604884,0.033951163,1
828
+ 0.0021053187,0.9978947,0
829
+ 0.0010464644,0.9989535,0
830
+ 0.97978485,0.020215154,1
831
+ 0.99856085,0.0014391541,1
832
+ 0.006126183,0.99387383,0
833
+ 0.0012954602,0.99870455,0
834
+ 0.0011313771,0.99886864,0
835
+ 0.00074777467,0.9992522,0
836
+ 0.03288351,0.9671165,0
837
+ 0.0021799018,0.9978201,0
838
+ 0.9997577,0.00024229288,1
839
+ 0.0013078868,0.9986921,0
840
+ 0.9985726,0.001427412,1
841
+ 0.0012448563,0.99875516,0
842
+ 0.99989533,0.000104665756,1
843
+ 0.27335644,0.72664356,0
844
+ 0.99926525,0.00073474646,1
845
+ 0.8573537,0.14264631,1
846
+ 0.0004410353,0.999559,0
847
+ 0.99903715,0.00096285343,1
848
+ 0.0090349205,0.99096507,0
849
+ 0.99941945,0.00058054924,1
850
+ 0.91562104,0.08437896,1
851
+ 0.12860882,0.8713912,0
852
+ 0.97572225,0.024277747,1
853
+ 0.13642058,0.8635794,0
854
+ 0.003712129,0.9962879,0
855
+ 0.94779,0.052209973,1
856
+ 0.0019567248,0.9980433,0
857
+ 0.9998429,0.00015711784,1
858
+ 0.83540064,0.16459936,1
859
+ 0.00044724531,0.9995527,0
860
+ 0.0022714045,0.9977286,0
861
+ 0.004430588,0.9955694,0
862
+ 0.99984646,0.00015354156,1
863
+ 0.99713624,0.0028637648,1
864
+ 0.006214071,0.9937859,0
865
+ 0.99939895,0.00060105324,1
866
+ 0.9994305,0.0005695224,1
867
+ 0.99983656,0.00016343594,1
868
+ 0.9982292,0.0017707944,1
869
+ 0.9969907,0.0030093193,1
870
+ 0.0009842049,0.9990158,0
871
+ 0.006238087,0.9937619,0
872
+ 0.36504304,0.63495696,0
873
+ 0.08662903,0.91337097,0
874
+ 0.99981827,0.00018173456,1
875
+ 0.99985147,0.00014853477,1
876
+ 0.9997904,0.00020962954,1
877
+ 0.9998722,0.00012779236,1
878
+ 0.999884,0.00011599064,1
879
+ 0.99979895,0.00020104647,1
880
+ 0.0017960909,0.99820393,0
881
+ 0.999907,9.2983246e-05,1
882
+ 0.0015376874,0.9984623,0
883
+ 0.046136733,0.95386326,0
884
+ 0.0034951433,0.99650484,0
885
+ 0.99964786,0.00035214424,1
886
+ 0.9988857,0.0011143088,1
887
+ 0.0060099154,0.99399006,0
888
+ 0.99925035,0.0007496476,1
889
+ 0.99981374,0.00018626451,1
890
+ 0.9998553,0.00014472008,1
891
+ 0.99965763,0.00034236908,1
892
+ 0.0010899563,0.99891007,0
893
+ 0.9185802,0.081419826,1
894
+ 0.00037244946,0.99962753,0
895
+ 0.99626833,0.003731668,1
896
+ 0.99987733,0.00012266636,1
897
+ 0.99989724,0.00010275841,1
898
+ 0.99887604,0.0011239648,1
899
+ 0.9997453,0.00025469065,1
900
+ 0.99990165,9.8347664e-05,1
901
+ 0.998181,0.0018190145,1
902
+ 0.00048398078,0.999516,0
903
+ 0.68607295,0.31392705,1
904
+ 0.99222094,0.007779062,1
905
+ 0.99927706,0.00072294474,1
906
+ 0.989486,0.010514021,1
907
+ 0.0057389196,0.9942611,0
908
+ 0.5470042,0.45299578,1
909
+ 0.08128349,0.9187165,0
910
+ 0.001237843,0.99876213,0
911
+ 0.0140639115,0.9859361,0
912
+ 0.046059057,0.9539409,0
913
+ 0.99987507,0.00012493134,1
914
+ 0.0008195735,0.99918044,0
915
+ 0.5977943,0.4022057,1
916
+ 0.8288801,0.17111993,1
917
+ 0.9964748,0.0035251975,1
918
+ 0.9990901,0.0009099245,1
919
+ 0.9998578,0.00014221668,1
920
+ 0.998711,0.00128901,1
921
+ 0.9996866,0.00031340122,1
922
+ 0.038840327,0.96115965,0
923
+ 0.0009697695,0.99903023,0
924
+ 0.9985177,0.0014823079,1
925
+ 0.033062626,0.96693736,0
926
+ 0.0006946225,0.99930537,0
927
+ 0.022865813,0.97713417,0
928
+ 0.029993463,0.9700065,0
929
+ 0.24968411,0.7503159,0
930
+ 0.99893147,0.0010685325,1
931
+ 0.05644419,0.94355583,0
932
+ 0.004025738,0.99597424,0
933
+ 0.00069794897,0.999302,0
934
+ 0.48311204,0.51688796,0
935
+ 0.9960265,0.003973484,1
936
+ 0.13559395,0.86440605,0
937
+ 0.00041110907,0.9995889,0
938
+ 0.0011048449,0.99889517,0
939
+ 0.99957246,0.00042754412,1
940
+ 0.99987686,0.0001231432,1
941
+ 0.00051897974,0.999481,0
942
+ 0.0029463405,0.9970537,0
943
+ 0.00076957856,0.99923044,0
944
+ 0.89152277,0.108477235,1
945
+ 0.0004986554,0.99950135,0
946
+ 0.97828615,0.021713853,1
947
+ 0.0070983907,0.9929016,0
948
+ 0.002319099,0.9976809,0
949
+ 0.0041857366,0.99581426,0
950
+ 0.99715984,0.0028401613,1
951
+ 0.9996068,0.00039321184,1
952
+ 0.9714792,0.028520823,1
953
+ 0.0013851725,0.99861485,0
954
+ 0.019722594,0.9802774,0
955
+ 0.9998859,0.00011408329,1
956
+ 0.9997441,0.00025588274,1
957
+ 0.041229405,0.9587706,0
958
+ 0.06628932,0.9337107,0
959
+ 0.002718599,0.9972814,0
960
+ 0.32974356,0.67025644,0
961
+ 0.9937702,0.006229818,1
962
+ 0.0035137467,0.99648625,0
963
+ 0.00043472333,0.9995653,0
964
+ 0.0025935671,0.9974064,0
965
+ 0.0016425685,0.9983574,0
966
+ 0.0030109806,0.996989,0
967
+ 0.00200158,0.9979984,0
968
+ 0.9998505,0.00014948845,1
969
+ 0.99987674,0.0001232624,1
970
+ 0.9958961,0.004103899,1
971
+ 0.9988111,0.0011888742,1
972
+ 0.9997956,0.00020438433,1
973
+ 0.99811256,0.0018874407,1
974
+ 0.0017198748,0.9982801,0
975
+ 0.00093969324,0.99906033,0
976
+ 0.8628573,0.13714272,1
977
+ 0.99978346,0.00021654367,1
978
+ 0.9962877,0.0037122965,1
979
+ 0.0026677295,0.9973323,0
980
+ 0.0047488497,0.9952512,0
981
+ 0.0006212853,0.99937874,0
982
+ 0.001772768,0.99822724,0
983
+ 0.0006938838,0.99930614,0
984
+ 0.99985373,0.0001462698,1
985
+ 0.9997454,0.00025457144,1
986
+ 0.0019583474,0.9980416,0
987
+ 0.9998055,0.00019448996,1
988
+ 0.99977857,0.00022143126,1
989
+ 0.008381903,0.9916181,0
990
+ 0.26681867,0.73318136,0
991
+ 0.99978834,0.0002116561,1
992
+ 0.0014425204,0.9985575,0
993
+ 0.8699408,0.13005918,1
994
+ 0.9487839,0.051216125,1
995
+ 0.06107866,0.93892133,0
996
+ 0.77807987,0.22192013,1
997
+ 0.0013029273,0.9986971,0
998
+ 0.88318485,0.11681515,1
999
+ 0.24346063,0.75653934,0
1000
+ 0.010824579,0.98917544,0
1001
+ 0.98132104,0.018678963,1
1002
+ 0.0004295109,0.9995705,0
1003
+ 0.0006777937,0.99932224,0
1004
+ 0.97983783,0.020162165,1
1005
+ 0.9997626,0.0002374053,1
1006
+ 0.9998635,0.00013649464,1
1007
+ 0.99984527,0.00015473366,1
1008
+ 0.93892294,0.06107706,1
1009
+ 0.00094111694,0.9990589,0
1010
+ 0.9996896,0.000310421,1
1011
+ 0.0018061006,0.9981939,0
1012
+ 0.99983585,0.00016415119,1
1013
+ 0.0005744663,0.99942553,0
1014
+ 0.9998721,0.00012791157,1
1015
+ 0.0052644163,0.9947356,0
1016
+ 0.046919707,0.9530803,0
1017
+ 0.13338585,0.86661416,0
1018
+ 0.9991726,0.0008273721,1
1019
+ 0.9998634,0.00013661385,1
1020
+ 0.9981431,0.0018569231,1
1021
+ 0.9997352,0.00026482344,1
1022
+ 0.99920815,0.0007918477,1
1023
+ 0.99875855,0.0012414455,1
1024
+ 0.9994655,0.00053447485,1
1025
+ 0.0014393745,0.9985606,0
1026
+ 0.9997805,0.0002195239,1
1027
+ 0.9161167,0.083883286,1
1028
+ 0.0008059861,0.999194,0
1029
+ 0.010094708,0.9899053,0
1030
+ 0.00074197387,0.99925804,0
1031
+ 0.00050780573,0.99949217,0
1032
+ 0.0007938607,0.9992061,0
1033
+ 0.9998878,0.00011217594,1
1034
+ 0.016171047,0.98382896,0
1035
+ 0.9998908,0.00010919571,1
1036
+ 0.9998902,0.000109791756,1
1037
+ 0.9329999,0.06700009,1
1038
+ 0.9997906,0.00020939112,1
1039
+ 0.9996402,0.00035977364,1
1040
+ 0.002618646,0.9973813,0
1041
+ 0.99986935,0.00013065338,1
1042
+ 0.010769631,0.9892304,0
1043
+ 0.95059365,0.04940635,1
1044
+ 0.99958426,0.0004157424,1
1045
+ 0.99955255,0.00044745207,1
1046
+ 0.004183877,0.9958161,0
1047
+ 0.99987495,0.00012505054,1
1048
+ 0.020346763,0.97965324,0
1049
+ 0.99900466,0.000995338,1
1050
+ 0.99976414,0.00023585558,1
1051
+ 0.00855446,0.99144554,0
1052
+ 0.99885106,0.0011489391,1
1053
+ 0.98526055,0.014739454,1
1054
+ 0.0047632316,0.99523675,0
1055
+ 0.13477668,0.8652233,0
1056
+ 0.99979216,0.0002078414,1
1057
+ 0.9989642,0.0010358095,1
1058
+ 0.014055643,0.98594433,0
1059
+ 0.00093673257,0.99906325,0
1060
+ 0.024903545,0.97509646,0
1061
+ 0.00037861933,0.9996214,0
1062
+ 0.98970807,0.010291934,1
1063
+ 0.00068686885,0.9993131,0
1064
+ 0.004941081,0.9950589,0
1065
+ 0.9998567,0.00014328957,1
1066
+ 0.9996816,0.000318408,1
1067
+ 0.814943,0.18505698,1
1068
+ 0.99751437,0.002485633,1
1069
+ 0.9368456,0.0631544,1
1070
+ 0.9928894,0.0071105957,1
1071
+ 0.99983156,0.00016844273,1
1072
+ 0.0019478267,0.9980522,0
1073
+ 0.00070620805,0.9992938,0
1074
+ 0.99988544,0.00011456013,1
1075
+ 0.0016910142,0.99830896,0
1076
+ 0.993863,0.0061370134,1
1077
+ 0.99987686,0.0001231432,1
1078
+ 0.00050344766,0.9994966,0
1079
+ 0.9996784,0.00032162666,1
1080
+ 0.9982547,0.0017452836,1
1081
+ 0.9998883,0.000111699104,1
1082
+ 0.0011172291,0.9988828,0
1083
+ 0.0033282782,0.99667174,0
1084
+ 0.15365009,0.8463499,0
1085
+ 0.999356,0.0006440282,1
1086
+ 0.9989506,0.0010493994,1
1087
+ 0.99979013,0.00020986795,1
1088
+ 0.9997656,0.00023442507,1
1089
+ 0.99978215,0.00021785498,1
1090
+ 0.9998368,0.00016319752,1
1091
+ 0.0032640146,0.996736,0
1092
+ 0.000524289,0.9994757,0
1093
+ 0.06716591,0.9328341,0
1094
+ 0.00040406684,0.99959594,0
1095
+ 0.9998216,0.0001783967,1
1096
+ 0.9781617,0.021838307,1
1097
+ 0.025684485,0.9743155,0
1098
+ 0.0022520102,0.997748,0
1099
+ 0.55749655,0.44250345,1
1100
+ 0.9976406,0.0023593903,1
1101
+ 0.92987233,0.070127666,1
1102
+ 0.0007877947,0.9992122,0
1103
+ 0.0007250078,0.99927497,0
1104
+ 0.9990569,0.0009431243,1
1105
+ 0.67327213,0.32672787,1
1106
+ 0.014933303,0.9850667,0
1107
+ 0.00538851,0.9946115,0
1108
+ 0.99958724,0.00041276217,1
1109
+ 0.0084286295,0.99157137,0
1110
+ 0.9994357,0.0005642772,1
1111
+ 0.0005198832,0.9994801,0
1112
+ 0.082494535,0.91750544,0
1113
+ 0.8127193,0.18728071,1
1114
+ 0.999706,0.0002940297,1
1115
+ 0.9993832,0.00061678886,1
1116
+ 0.00060263206,0.9993974,0
1117
+ 0.041682366,0.95831764,0
1118
+ 0.055839956,0.94416004,0
1119
+ 0.009061624,0.99093837,0
1120
+ 0.23380482,0.7661952,0
1121
+ 0.0028321445,0.9971678,0
1122
+ 0.9998373,0.00016272068,1
1123
+ 0.0038410763,0.9961589,0
1124
+ 0.9996867,0.000313282,1
1125
+ 0.038992584,0.9610074,0
1126
+ 0.99987996,0.000120043755,1
1127
+ 0.9997855,0.00021451712,1
1128
+ 0.4841131,0.5158869,0
1129
+ 0.00086596113,0.99913406,0
1130
+ 0.9998186,0.00018137693,1
1131
+ 0.0012129084,0.9987871,0
1132
+ 0.27484408,0.72515595,0
1133
+ 0.047348812,0.9526512,0
1134
+ 0.0011186278,0.9988814,0
1135
+ 0.98457664,0.0154233575,1
1136
+ 0.0044437405,0.99555624,0
1137
+ 0.0013186974,0.9986813,0
1138
+ 0.02009379,0.9799062,0
1139
+ 0.6401105,0.3598895,1
1140
+ 0.00080136437,0.9991986,0
1141
+ 0.00069086277,0.9993091,0
1142
+ 0.7626941,0.23730588,1
1143
+ 0.00085747615,0.9991425,0
1144
+ 0.0122556975,0.98774433,0
1145
+ 0.00045623677,0.9995438,0
1146
+ 0.0007524244,0.99924755,0
1147
+ 0.000909159,0.99909085,0
1148
+ 0.95969266,0.040307343,1
1149
+ 0.99983823,0.000161767,1
1150
+ 0.00069285,0.99930716,0
1151
+ 0.21301623,0.7869838,0
1152
+ 0.9998103,0.00018972158,1
1153
+ 0.9998073,0.00019270182,1
1154
+ 0.023714043,0.97628593,0
1155
+ 0.8223661,0.17763388,1
1156
+ 0.014953063,0.9850469,0
1157
+ 0.003410989,0.996589,0
1158
+ 0.0014916003,0.9985084,0
1159
+ 0.0024160545,0.9975839,0
1160
+ 0.99561065,0.0043893456,1
1161
+ 0.9998431,0.00015687943,1
1162
+ 0.99583095,0.004169047,1
1163
+ 0.04496124,0.9550388,0
1164
+ 0.99861956,0.0013804436,1
1165
+ 0.9996673,0.00033271313,1
1166
+ 0.9997181,0.00028187037,1
1167
+ 0.00087235175,0.9991276,0
1168
+ 0.028256536,0.97174346,0
1169
+ 0.9998503,0.00014972687,1
1170
+ 0.08869008,0.9113099,0
1171
+ 0.9966072,0.0033928156,1
1172
+ 0.0009304818,0.9990695,0
1173
+ 0.035889596,0.9641104,0
1174
+ 0.9992005,0.0007994771,1
1175
+ 0.999801,0.00019901991,1
1176
+ 0.000648822,0.9993512,0
1177
+ 0.009124103,0.9908759,0
1178
+ 0.012377157,0.98762286,0
1179
+ 0.086489685,0.9135103,0
1180
+ 0.00034069165,0.9996593,0
1181
+ 0.019698003,0.980302,0
1182
+ 0.9998934,0.000106573105,1
1183
+ 0.0028126878,0.9971873,0
1184
+ 0.0052378504,0.9947621,0
1185
+ 0.0010660989,0.9989339,0
1186
+ 0.98770255,0.0122974515,1
1187
+ 0.9985154,0.0014845729,1
1188
+ 0.0056083985,0.9943916,0
1189
+ 0.92215335,0.07784665,1
1190
+ 0.99978906,0.00021094084,1
1191
+ 0.999584,0.00041598082,1
1192
+ 0.050261714,0.94973826,0
1193
+ 0.99988985,0.00011014938,1
1194
+ 0.9996803,0.00031971931,1
1195
+ 0.9859671,0.0140329,1
1196
+ 0.017114507,0.9828855,0
1197
+ 0.01527932,0.9847207,0
1198
+ 0.0012601947,0.9987398,0
1199
+ 0.9997423,0.00025767088,1
1200
+ 0.99987984,0.000120162964,1
1201
+ 0.99927634,0.00072366,1
1202
+ 0.0020542203,0.9979458,0
1203
+ 0.0024840469,0.997516,0
1204
+ 0.09163898,0.908361,0
1205
+ 0.017407136,0.9825929,0
1206
+ 0.0007548872,0.9992451,0
1207
+ 0.99940646,0.00059354305,1
1208
+ 0.0070985584,0.99290144,0
1209
+ 0.0032868078,0.9967132,0
1210
+ 0.00048096426,0.99951905,0
1211
+ 0.00085708854,0.9991429,0
1212
+ 0.6825614,0.3174386,1
1213
+ 0.014209282,0.9857907,0
1214
+ 0.9999021,9.787083e-05,1
1215
+ 0.020350575,0.9796494,0
1216
+ 0.012328551,0.98767143,0
1217
+ 0.0017718398,0.99822813,0
1218
+ 0.9850117,0.014988303,1
1219
+ 0.0014757385,0.99852425,0
1220
+ 0.99123996,0.008760035,1
1221
+ 0.094885655,0.90511435,0
1222
+ 0.0018491,0.9981509,0
1223
+ 0.9982035,0.001796484,1
1224
+ 0.9998714,0.00012862682,1
1225
+ 0.9996136,0.0003864169,1
1226
+ 0.995414,0.0045859814,1
1227
+ 0.9992663,0.0007336736,1
1228
+ 0.99987876,0.00012123585,1
1229
+ 0.822752,0.177248,1
1230
+ 0.004088157,0.99591184,0
1231
+ 0.7504031,0.2495969,1
1232
+ 0.98732567,0.012674332,1
1233
+ 0.99986935,0.00013065338,1
1234
+ 0.7112427,0.28875732,1
1235
+ 0.97539186,0.024608135,1
1236
+ 0.9997894,0.00021058321,1
1237
+ 0.0013531825,0.9986468,0
1238
+ 0.99938047,0.0006195307,1
1239
+ 0.99963295,0.0003670454,1
1240
+ 0.13642156,0.86357844,0
1241
+ 0.9833572,0.016642809,1
1242
+ 0.96957725,0.030422747,1
1243
+ 0.99802846,0.0019715428,1
1244
+ 0.99929607,0.00070393085,1
1245
+ 0.69179475,0.30820525,1
1246
+ 0.9983006,0.001699388,1
1247
+ 0.016020698,0.9839793,0
1248
+ 0.0008094693,0.9991905,0
1249
+ 0.07204899,0.927951,0
1250
+ 0.0066855224,0.9933145,0
1251
+ 0.9998072,0.00019282103,1
1252
+ 0.9961349,0.0038651228,1
1253
+ 0.0007047585,0.99929523,0
1254
+ 0.009683632,0.9903164,0
1255
+ 0.0032659478,0.996734,0
1256
+ 0.9986779,0.0013220906,1
1257
+ 0.91074854,0.08925146,1
1258
+ 0.0069067217,0.99309325,0
1259
+ 0.019306092,0.98069394,0
1260
+ 0.0016127066,0.9983873,0
1261
+ 0.008151328,0.99184865,0
1262
+ 0.9989655,0.0010344982,1
1263
+ 0.32452458,0.6754754,0
1264
+ 0.35278073,0.6472193,0
1265
+ 0.9997228,0.0002772212,1
1266
+ 0.99860364,0.001396358,1
1267
+ 0.0026738644,0.99732614,0
1268
+ 0.000673204,0.9993268,0
1269
+ 0.002341402,0.9976586,0
1270
+ 0.9996941,0.00030589104,1
1271
+ 0.9995297,0.00047028065,1
1272
+ 0.9997696,0.00023037195,1
1273
+ 0.94700944,0.052990556,1
1274
+ 0.99984145,0.00015854836,1
1275
+ 0.022548309,0.9774517,0
1276
+ 0.9994073,0.0005927086,1
1277
+ 0.015813189,0.9841868,0
1278
+ 0.57489127,0.42510873,1
1279
+ 0.6667663,0.3332337,1
1280
+ 0.0035003896,0.9964996,0
1281
+ 0.99957925,0.0004207492,1
1282
+ 0.0229167,0.9770833,0
1283
+ 0.00071966054,0.99928033,0
1284
+ 0.9950872,0.0049127936,1
1285
+ 0.9926596,0.0073403716,1
1286
+ 0.9985115,0.0014885068,1
1287
+ 0.99984133,0.00015866756,1
1288
+ 0.9998504,0.00014960766,1
1289
+ 0.09079589,0.9092041,0
1290
+ 0.10645368,0.89354634,0
1291
+ 0.51953757,0.48046243,1
1292
+ 0.0010486891,0.9989513,0
1293
+ 0.042479075,0.9575209,0
1294
+ 0.04028889,0.95971113,0
1295
+ 0.0058548264,0.99414515,0
1296
+ 0.00695637,0.9930436,0
1297
+ 0.9619067,0.03809333,1
1298
+ 0.001363561,0.9986364,0
1299
+ 0.9996642,0.00033581257,1
1300
+ 0.9994894,0.0005105734,1
1301
+ 0.2246372,0.7753628,0
1302
+ 0.9998467,0.00015330315,1
1303
+ 0.9835787,0.016421318,1
1304
+ 0.9970487,0.002951324,1
1305
+ 0.0036778413,0.99632215,0
1306
+ 0.03348522,0.96651477,0
1307
+ 0.00481851,0.9951815,0
1308
+ 0.00064688385,0.9993531,0
1309
+ 0.9929066,0.0070934296,1
1310
+ 0.006865126,0.99313486,0
1311
+ 0.9945786,0.0054214,1
1312
+ 0.001322475,0.99867755,0
1313
+ 0.005048568,0.9949514,0
1314
+ 0.9950303,0.004969716,1
1315
+ 0.041830357,0.95816964,0
1316
+ 0.99989617,0.00010383129,1
1317
+ 0.2020051,0.7979949,0
1318
+ 0.99988675,0.000113248825,1
1319
+ 0.9998727,0.00012731552,1
1320
+ 0.97861177,0.021388233,1
1321
+ 0.0023054148,0.9976946,0
1322
+ 0.9995945,0.0004054904,1
1323
+ 0.00041710577,0.9995829,0
1324
+ 0.0032137812,0.99678624,0
1325
+ 0.99981934,0.00018066168,1
1326
+ 0.806486,0.19351399,1
1327
+ 0.00068348023,0.9993165,0
1328
+ 0.01681662,0.9831834,0
1329
+ 0.026612433,0.97338754,0
1330
+ 0.0010068077,0.9989932,0
1331
+ 0.0020133855,0.9979866,0
1332
+ 0.66372603,0.33627397,1
1333
+ 0.00034197184,0.99965805,0
1334
+ 0.9998847,0.00011527538,1
1335
+ 0.9996729,0.0003271103,1
1336
+ 0.8478253,0.15217471,1
1337
+ 0.99976474,0.00023525953,1
1338
+ 0.0023019821,0.997698,0
1339
+ 0.9993656,0.00063437223,1
1340
+ 0.0009189056,0.9990811,0
1341
+ 0.000970797,0.9990292,0
1342
+ 0.9991966,0.000803411,1
1343
+ 0.0025322684,0.99746776,0
1344
+ 0.99986756,0.00013244152,1
1345
+ 0.99889946,0.0011005402,1
1346
+ 0.9998592,0.00014078617,1
1347
+ 0.0031590539,0.99684095,0
1348
+ 0.99502003,0.004979968,1
1349
+ 0.9997688,0.00023120642,1
1350
+ 0.004636773,0.99536324,0
1351
+ 0.99622285,0.0037771463,1
1352
+ 0.99975306,0.00024694204,1
1353
+ 0.95300466,0.04699534,1
1354
+ 0.0007207516,0.99927926,0
1355
+ 0.99975353,0.0002464652,1
1356
+ 0.0035972926,0.9964027,0
1357
+ 0.0016834488,0.9983165,0
1358
+ 0.9633366,0.036663413,1
1359
+ 0.008187345,0.99181265,0
1360
+ 0.99904734,0.00095266104,1
1361
+ 0.0010455247,0.9989545,0
1362
+ 0.9274769,0.07252312,1
1363
+ 0.99818283,0.0018171668,1
1364
+ 0.17862533,0.82137465,0
1365
+ 0.99910057,0.0008994341,1
1366
+ 0.9998895,0.00011050701,1
1367
+ 0.9995993,0.00040072203,1
1368
+ 0.99984765,0.00015234947,1
1369
+ 0.99988735,0.00011265278,1
1370
+ 0.99984336,0.000156641,1
1371
+ 0.99966836,0.00033164024,1
1372
+ 0.4243909,0.5756091,0
1373
+ 0.0045117154,0.9954883,0
1374
+ 0.0016531252,0.99834687,0
1375
+ 0.9998437,0.00015628338,1
1376
+ 0.99966,0.0003399849,1
1377
+ 0.009621832,0.99037814,0
1378
+ 0.99935955,0.0006404519,1
1379
+ 0.15945785,0.84054214,0
1380
+ 0.99979573,0.00020426512,1
1381
+ 0.009892495,0.9901075,0
1382
+ 0.9991835,0.000816524,1
1383
+ 0.9976891,0.002310872,1
1384
+ 0.9997811,0.00021892786,1
1385
+ 0.99836284,0.0016371608,1
1386
+ 0.9044741,0.09552592,1
1387
+ 0.021106085,0.97889394,0
1388
+ 0.0009765098,0.9990235,0
1389
+ 0.9973163,0.0026836991,1
1390
+ 0.0009045937,0.9990954,0
1391
+ 0.99569005,0.0043099523,1
1392
+ 0.9996959,0.0003041029,1
1393
+ 0.9892446,0.01075542,1
1394
+ 0.003932632,0.99606735,0
1395
+ 0.9995259,0.00047409534,1
1396
+ 0.99975616,0.0002438426,1
1397
+ 0.0008234155,0.99917656,0
1398
+ 0.019701503,0.9802985,0
1399
+ 0.99966097,0.00033903122,1
1400
+ 0.9993038,0.00069618225,1
1401
+ 0.036458172,0.9635418,0
1402
+ 0.999858,0.00014197826,1
1403
+ 0.00085888465,0.9991411,0
1404
+ 0.00046995457,0.99953,0
1405
+ 0.036033507,0.9639665,0
1406
+ 0.9998437,0.00015628338,1
1407
+ 0.022376027,0.977624,0
1408
+ 0.9997533,0.00024670362,1
1409
+ 0.9998665,0.0001335144,1
1410
+ 0.0019861858,0.9980138,0
1411
+ 0.9998665,0.0001335144,1
1412
+ 0.0032734273,0.9967266,0
1413
+ 0.99989164,0.000108361244,1
1414
+ 0.010293513,0.9897065,0
1415
+ 0.9848646,0.015135407,1
1416
+ 0.04890146,0.95109856,0
1417
+ 0.9998642,0.00013577938,1
1418
+ 0.0004259061,0.99957407,0
1419
+ 0.00045915032,0.99954087,0
1420
+ 0.0019283749,0.9980716,0
1421
+ 0.9998889,0.00011110306,1
1422
+ 0.9998752,0.00012481213,1
1423
+ 0.9993268,0.00067317486,1
1424
+ 0.0012653967,0.9987346,0
1425
+ 0.0056609632,0.99433905,0
1426
+ 0.9497939,0.050206125,1
1427
+ 0.6338669,0.3661331,1
1428
+ 0.0067454083,0.9932546,0
1429
+ 0.99976677,0.00023323298,1
1430
+ 0.9995289,0.0004711151,1
1431
+ 0.0019593746,0.9980406,0
1432
+ 0.0015933962,0.9984066,0
1433
+ 0.9997701,0.00022989511,1
1434
+ 0.25864217,0.7413578,0
1435
+ 0.0024167523,0.99758327,0
1436
+ 0.035206456,0.96479356,0
1437
+ 0.9993863,0.0006136894,1
1438
+ 0.99976736,0.00023263693,1
1439
+ 0.0021802525,0.9978197,0
1440
+ 0.95753574,0.042464256,1
1441
+ 0.99982494,0.00017505884,1
1442
+ 0.999741,0.00025898218,1
1443
+ 0.9998293,0.0001707077,1
1444
+ 0.014395516,0.98560447,0
1445
+ 0.999574,0.0004259944,1
1446
+ 0.88353956,0.11646044,1
1447
+ 0.96972275,0.030277252,1
1448
+ 0.9980209,0.0019791126,1
1449
+ 0.049169924,0.9508301,0
1450
+ 0.9998204,0.0001795888,1
1451
+ 0.00047030477,0.9995297,0
1452
+ 0.94815695,0.051843047,1
1453
+ 0.01274481,0.9872552,0
1454
+ 0.04547661,0.9545234,0
1455
+ 0.99976593,0.00023406744,1
1456
+ 0.9995808,0.00041919947,1
1457
+ 0.001992908,0.9980071,0
1458
+ 0.99981385,0.0001861453,1
1459
+ 0.99952626,0.00047373772,1
1460
+ 0.46631286,0.5336871,0
1461
+ 0.002193784,0.9978062,0
1462
+ 0.9995895,0.0004104972,1
1463
+ 0.9992016,0.0007984042,1
1464
+ 0.0004465854,0.99955344,0
1465
+ 0.004318758,0.9956812,0
1466
+ 0.99981207,0.00018793344,1
1467
+ 0.0008964967,0.9991035,0
1468
+ 0.18074627,0.81925374,0
1469
+ 0.62929094,0.37070906,1
1470
+ 0.0009246992,0.9990753,0
1471
+ 0.999826,0.00017398596,1
1472
+ 0.0014277773,0.99857223,0
1473
+ 0.9997905,0.00020951033,1
1474
+ 0.0009916759,0.9990083,0
1475
+ 0.001873005,0.998127,0
1476
+ 0.00072076276,0.99927926,0
1477
+ 0.9998889,0.00011110306,1
1478
+ 0.0032118382,0.99678814,0
1479
+ 0.9998901,0.000109910965,1
1480
+ 0.9667485,0.033251524,1
1481
+ 0.021340137,0.97865987,0
1482
+ 0.002107285,0.99789274,0
1483
+ 0.83981794,0.16018206,1
1484
+ 0.99983156,0.00016844273,1
1485
+ 0.9998739,0.00012612343,1
1486
+ 0.04919543,0.9508046,0
1487
+ 0.6831163,0.31688368,1
1488
+ 0.00444778,0.99555224,0
1489
+ 0.99974626,0.00025373697,1
1490
+ 0.008707594,0.9912924,0
1491
+ 0.99029166,0.009708345,1
1492
+ 0.9998692,0.00013077259,1
1493
+ 0.0022228453,0.99777716,0
1494
+ 0.99598736,0.0040126443,1
1495
+ 0.00052444637,0.99947554,0
1496
+ 0.013174158,0.9868258,0
1497
+ 0.9811844,0.018815577,1
1498
+ 0.22822694,0.77177304,0
1499
+ 0.9995353,0.0004646778,1
1500
+ 0.9998673,0.00013267994,1
1501
+ 0.9998801,0.000119924545,1
1502
+ 0.00083288655,0.9991671,0
1503
+ 0.019334035,0.980666,0
1504
+ 0.9988438,0.0011562109,1
1505
+ 0.99943715,0.00056284666,1
1506
+ 0.0042960695,0.99570394,0
1507
+ 0.00052439637,0.9994756,0
1508
+ 0.0010083526,0.99899167,0
1509
+ 0.0010906205,0.99890935,0
1510
+ 0.99986255,0.00013744831,1
1511
+ 0.9998252,0.00017482042,1
1512
+ 0.9995146,0.00048542023,1
1513
+ 0.000731219,0.9992688,0
1514
+ 0.0052024093,0.9947976,0
1515
+ 0.9964541,0.0035458803,1
1516
+ 0.9998543,0.00014567375,1
1517
+ 0.00059040025,0.9994096,0
1518
+ 0.99983203,0.00016796589,1
1519
+ 0.99685717,0.0031428337,1
1520
+ 0.00072333636,0.99927664,0
1521
+ 0.99976724,0.00023275614,1
1522
+ 0.0024889186,0.9975111,0
1523
+ 0.99988365,0.00011634827,1
1524
+ 0.0022025735,0.9977974,0
1525
+ 0.0022719945,0.997728,0
1526
+ 0.99985754,0.0001424551,1
1527
+ 0.9973937,0.0026062727,1
1528
+ 0.0023864168,0.9976136,0
1529
+ 0.17679791,0.8232021,0
1530
+ 0.0005216321,0.99947834,0
1531
+ 0.99859256,0.0014074445,1
1532
+ 0.0008994314,0.99910057,0
1533
+ 0.99478585,0.0052141547,1
1534
+ 0.99979335,0.0002066493,1
1535
+ 0.12942544,0.8705746,0
1536
+ 0.99986136,0.0001386404,1
1537
+ 0.11517099,0.884829,0
1538
+ 0.9998294,0.0001705885,1
1539
+ 0.99895954,0.0010404587,1
1540
+ 0.99613273,0.0038672686,1
1541
+ 0.99983025,0.00016975403,1
1542
+ 0.00040963385,0.99959034,0
1543
+ 0.9977992,0.0022007823,1
1544
+ 0.9739179,0.026082098,1
1545
+ 0.004345853,0.99565417,0
1546
+ 0.006053713,0.9939463,0
1547
+ 0.0016791263,0.9983209,0
1548
+ 0.9913675,0.008632481,1
1549
+ 0.0046222447,0.9953778,0
1550
+ 0.0013940433,0.99860597,0
1551
+ 0.0015913546,0.9984087,0
1552
+ 0.0059807864,0.9940192,0
1553
+ 0.0026462497,0.99735373,0
1554
+ 0.9998692,0.00013077259,1
1555
+ 0.9995863,0.00041371584,1
1556
+ 0.99978966,0.00021034479,1
1557
+ 0.00043088966,0.9995691,0
1558
+ 0.015253298,0.9847467,0
1559
+ 0.585622,0.414378,1
1560
+ 0.0020175942,0.9979824,0
1561
+ 0.37034228,0.62965775,0
1562
+ 0.00040779563,0.9995922,0
1563
+ 0.0028202974,0.9971797,0
1564
+ 0.9996939,0.00030612946,1
1565
+ 0.9996094,0.00039058924,1
1566
+ 0.004077784,0.9959222,0
1567
+ 0.99977237,0.00022763014,1
1568
+ 0.0011807196,0.9988193,0
1569
+ 0.994825,0.0051749945,1
1570
+ 0.99032634,0.009673655,1
1571
+ 0.9994097,0.0005903244,1
1572
+ 0.00067349593,0.9993265,0
1573
+ 0.9995447,0.00045531988,1
1574
+ 0.9998838,0.00011622906,1
1575
+ 0.9998543,0.00014567375,1
1576
+ 0.99897075,0.001029253,1
1577
+ 0.87280744,0.12719256,1
1578
+ 0.9998425,0.00015747547,1
1579
+ 0.95597947,0.044020534,1
1580
+ 0.008097042,0.99190295,0
1581
+ 0.9998041,0.00019592047,1
1582
+ 0.9998054,0.00019460917,1
1583
+ 0.99949026,0.0005097389,1
1584
+ 0.032233093,0.9677669,0
1585
+ 0.99981385,0.0001861453,1
1586
+ 0.99971753,0.0002824664,1
1587
+ 0.0052396143,0.9947604,0
1588
+ 0.9983871,0.0016129017,1
1589
+ 0.99990296,9.703636e-05,1
1590
+ 0.0013588495,0.99864113,0
1591
+ 0.0007022909,0.99929774,0
1592
+ 0.0027055147,0.9972945,0
1593
+ 0.021917118,0.9780829,0
1594
+ 0.9978259,0.0021740794,1
1595
+ 0.99981207,0.00018793344,1
1596
+ 0.9998267,0.0001732707,1
1597
+ 0.99980265,0.00019735098,1
1598
+ 0.9986016,0.0013983846,1
1599
+ 0.999642,0.0003579855,1
1600
+ 0.98986393,0.010136068,1
1601
+ 0.0004083554,0.99959165,0
1602
+ 0.0344822,0.9655178,0
1603
+ 0.005193786,0.99480623,0
1604
+ 0.99988747,0.00011253357,1
1605
+ 0.039941236,0.96005875,0
1606
+ 0.0023187317,0.99768126,0
1607
+ 0.99231285,0.0076871514,1
1608
+ 0.9996952,0.00030481815,1
1609
+ 0.0028359822,0.997164,0
1610
+ 0.9998098,0.00019019842,1
1611
+ 0.7141641,0.28583592,1
1612
+ 0.0009670136,0.999033,0
1613
+ 0.9998282,0.00017178059,1
1614
+ 0.009079368,0.9909206,0
1615
+ 0.99857986,0.0014201403,1
1616
+ 0.99903536,0.0009646416,1
1617
+ 0.0004929101,0.99950707,0
1618
+ 0.03476164,0.96523833,0
1619
+ 0.9928341,0.007165909,1
1620
+ 0.000879576,0.9991204,0
1621
+ 0.01936689,0.98063314,0
1622
+ 0.77292895,0.22707105,1
1623
+ 0.99988437,0.00011563301,1
1624
+ 0.0005537447,0.9994463,0
1625
+ 0.9998233,0.00017672777,1
1626
+ 0.10483965,0.8951603,0
1627
+ 0.0010610862,0.9989389,0
1628
+ 0.0015107063,0.9984893,0
1629
+ 0.67206246,0.32793754,1
1630
+ 0.74160254,0.25839746,1
1631
+ 0.00049924443,0.99950075,0
1632
+ 0.99063617,0.00936383,1
1633
+ 0.9982651,0.0017349124,1
1634
+ 0.0157435,0.9842565,0
1635
+ 0.99986994,0.00013005733,1
1636
+ 0.999887,0.00011301041,1
1637
+ 0.0031489774,0.996851,0
1638
+ 0.9998646,0.00013542175,1
1639
+ 0.99988425,0.00011575222,1
1640
+ 0.9998273,0.00017267466,1
1641
+ 0.9812774,0.018722594,1
1642
+ 0.009081338,0.99091864,0
1643
+ 0.00917657,0.99082345,0
1644
+ 0.0022112338,0.9977888,0
1645
+ 0.99817,0.0018299818,1
1646
+ 0.01771771,0.9822823,0
1647
+ 0.0018025974,0.9981974,0
1648
+ 0.014842669,0.9851573,0
1649
+ 0.014159287,0.98584074,0
1650
+ 0.0066198,0.9933802,0
1651
+ 0.99956983,0.00043016672,1
1652
+ 0.9895349,0.0104650855,1
1653
+ 0.0006335138,0.99936646,0
1654
+ 0.0024663648,0.9975336,0
1655
+ 0.0017252702,0.99827474,0
1656
+ 0.0059876703,0.99401236,0
1657
+ 0.8959277,0.10407227,1
1658
+ 0.9997004,0.00029957294,1
1659
+ 0.009042565,0.99095744,0
1660
+ 0.0006638254,0.9993362,0
1661
+ 0.001218552,0.99878144,0
1662
+ 0.9997781,0.00022190809,1
1663
+ 0.9948279,0.005172074,1
1664
+ 0.009048061,0.99095196,0
1665
+ 0.99984646,0.00015354156,1
1666
+ 0.05151747,0.9484825,0
1667
+ 0.99981743,0.00018256903,1
1668
+ 0.999345,0.00065499544,1
1669
+ 0.001062235,0.9989378,0
1670
+ 0.99916613,0.000833869,1
1671
+ 0.00053035404,0.99946964,0
1672
+ 0.0024746575,0.99752533,0
1673
+ 0.0011304434,0.99886954,0
1674
+ 0.05968584,0.9403142,0
1675
+ 0.99934095,0.00065904856,1
1676
+ 0.99989784,0.00010216236,1
1677
+ 0.0008041534,0.9991959,0
1678
+ 0.0006992785,0.9993007,0
1679
+ 0.9998549,0.0001450777,1
1680
+ 0.9998658,0.00013422966,1
1681
+ 0.19615054,0.80384946,0
1682
+ 0.9994803,0.0005196929,1
1683
+ 0.99589264,0.004107356,1
1684
+ 0.20370334,0.79629666,0
1685
+ 0.0007599002,0.9992401,0
1686
+ 0.9995322,0.00046777725,1
1687
+ 0.999059,0.0009409785,1
1688
+ 0.051172286,0.94882774,0
1689
+ 0.9872177,0.012782276,1
1690
+ 0.9995577,0.00044232607,1
1691
+ 0.03710999,0.96289,0
1692
+ 0.98343915,0.016560853,1
1693
+ 0.00073656125,0.99926347,0
1694
+ 0.0007468811,0.9992531,0
1695
+ 0.0013061495,0.9986938,0
1696
+ 0.7225132,0.2774868,1
1697
+ 0.005108332,0.99489164,0
1698
+ 0.0013259565,0.99867404,0
1699
+ 0.0056180866,0.9943819,0
1700
+ 0.740244,0.25975603,1
1701
+ 0.9988335,0.0011665225,1
1702
+ 0.99988246,0.00011754036,1
1703
+ 0.0010891542,0.99891084,0
1704
+ 0.99990666,9.3340874e-05,1
1705
+ 0.0057447064,0.9942553,0
1706
+ 0.9997912,0.00020879507,1
1707
+ 0.99980456,0.00019544363,1
1708
+ 0.3959574,0.6040426,0
1709
+ 0.0017516603,0.99824834,0
1710
+ 0.00076079025,0.9992392,0
1711
+ 0.9998977,0.00010228157,1
1712
+ 0.011712565,0.98828745,0
1713
+ 0.85288054,0.14711946,1
1714
+ 0.9998184,0.00018161535,1
1715
+ 0.9998816,0.000118374825,1
1716
+ 0.022444513,0.9775555,0
1717
+ 0.99986136,0.0001386404,1
1718
+ 0.0013287985,0.9986712,0
1719
+ 0.110791,0.88920903,0
1720
+ 0.008713931,0.99128604,0
1721
+ 0.32239056,0.67760944,0
1722
+ 0.0021325499,0.99786747,0
1723
+ 0.9998876,0.00011241436,1
1724
+ 0.030178083,0.96982193,0
1725
+ 0.011832474,0.9881675,0
1726
+ 0.7948421,0.20515788,1
1727
+ 0.0011315657,0.9988684,0
1728
+ 0.9996877,0.00031232834,1
1729
+ 0.0004355039,0.99956447,0
1730
+ 0.003961822,0.9960382,0
1731
+ 0.8305101,0.16948992,1
1732
+ 0.002312403,0.9976876,0
1733
+ 0.9602684,0.03973162,1
1734
+ 0.0032133197,0.99678665,0
1735
+ 0.0026589101,0.9973411,0
1736
+ 0.029958015,0.970042,0
1737
+ 0.40355667,0.5964433,0
1738
+ 0.003470913,0.9965291,0
1739
+ 0.99978405,0.00021594763,1
1740
+ 0.0018896591,0.99811035,0
1741
+ 0.9903031,0.009696901,1
1742
+ 0.99547297,0.0045270324,1
1743
+ 0.0021258756,0.99787414,0
1744
+ 0.99496114,0.0050388575,1
1745
+ 0.0022380084,0.99776196,0
1746
+ 0.00038595285,0.99961406,0
1747
+ 0.008375573,0.9916244,0
1748
+ 0.9998975,0.00010251999,1
1749
+ 0.002578058,0.9974219,0
1750
+ 0.9998385,0.00016152859,1
1751
+ 0.0011060521,0.998894,0
1752
+ 0.023686605,0.9763134,0
1753
+ 0.99854267,0.0014573336,1
1754
+ 0.9977558,0.0022441745,1
1755
+ 0.7420208,0.2579792,1
1756
+ 0.0025838136,0.9974162,0
1757
+ 0.01618608,0.98381394,0
1758
+ 0.9943106,0.0056893826,1
1759
+ 0.99988806,0.00011193752,1
1760
+ 0.99978215,0.00021785498,1
1761
+ 0.00076428,0.99923575,0
1762
+ 0.99545693,0.004543066,1
1763
+ 0.0016348206,0.99836516,0
1764
+ 0.9997873,0.00021272898,1
1765
+ 0.0004296247,0.99957037,0
1766
+ 0.0003620885,0.9996379,0
1767
+ 0.9997514,0.00024861097,1
1768
+ 0.0076970495,0.99230295,0
1769
+ 0.0010765801,0.9989234,0
1770
+ 0.0008869873,0.999113,0
1771
+ 0.008413542,0.99158645,0
1772
+ 0.0020842291,0.99791574,0
1773
+ 0.99972206,0.00027793646,1
1774
+ 0.9998946,0.00010538101,1
1775
+ 0.9998784,0.000121593475,1
1776
+ 0.9997501,0.00024992228,1
1777
+ 0.9996068,0.00039321184,1
1778
+ 0.0043077674,0.99569225,0
1779
+ 0.9996991,0.00030088425,1
1780
+ 0.9986576,0.0013424158,1
1781
+ 0.3606295,0.6393705,0
1782
+ 0.0005459426,0.9994541,0
1783
+ 0.99983907,0.00016093254,1
1784
+ 0.7244799,0.2755201,1
1785
+ 0.9998336,0.00016641617,1
1786
+ 0.013501611,0.9864984,0
1787
+ 0.9998528,0.00014722347,1
1788
+ 0.5574331,0.44256687,1
1789
+ 0.00156936,0.99843067,0
1790
+ 0.9997806,0.0002194047,1
1791
+ 0.011553483,0.98844653,0
1792
+ 0.9999,0.000100016594,1
1793
+ 0.007280226,0.99271977,0
1794
+ 0.00089651474,0.9991035,0
1795
+ 0.99988997,0.000110030174,1
1796
+ 0.9998317,0.00016832352,1
1797
+ 0.9995932,0.0004068017,1
1798
+ 0.0007864607,0.9992135,0
1799
+ 0.9998653,0.0001347065,1
1800
+ 0.9997745,0.00022548437,1
1801
+ 0.99988425,0.00011575222,1
1802
+ 0.078055434,0.92194456,0
1803
+ 0.0014224586,0.99857754,0
1804
+ 0.00041620075,0.9995838,0
1805
+ 0.99979824,0.00020176172,1
1806
+ 0.2257457,0.7742543,0
1807
+ 0.999411,0.0005890131,1
1808
+ 0.95687616,0.04312384,1
1809
+ 0.39864674,0.6013533,0
1810
+ 0.00034699676,0.999653,0
1811
+ 0.0050702714,0.99492973,0
1812
+ 0.00055140024,0.9994486,0
1813
+ 0.9998996,0.00010037422,1
1814
+ 0.9408695,0.05913049,1
1815
+ 0.0028980495,0.99710196,0
1816
+ 0.9997454,0.00025457144,1
1817
+ 0.0013525377,0.99864745,0
1818
+ 0.008645632,0.99135435,0
1819
+ 0.0008496059,0.9991504,0
1820
+ 0.0025813633,0.99741864,0
1821
+ 0.0024583698,0.9975416,0
1822
+ 0.0010341529,0.99896586,0
examples/AutoEAP_UMI-STARR-seq/Baseline/config/config-conv-117.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "batch_size": 64,
3
+ "encode": "one-hot",
4
+ "epochs": 100,
5
+ "early_stop": 20,
6
+ "lr": 0.001,
7
+ "convolution_layers": {
8
+ "n_layers": 4,
9
+ "filters": [1024, 512, 256, 128],
10
+ "kernel_sizes": [8, 16, 32, 64]
11
+ },
12
+ "transformer_layers": {
13
+ "n_layers": 0,
14
+ "attn_key_dim": [16, 16, 16],
15
+ "attn_heads": [2048, 2048, 2048]
16
+ },
17
+ "n_dense_layer": 1,
18
+ "dense_neurons1": 64,
19
+ "dropout_conv": "yes",
20
+ "dropout_prob": 0.4,
21
+ "pad": "same"
22
+ }
examples/AutoEAP_UMI-STARR-seq/Baseline/experiment.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # adapted from Deepstarr colab notebook: https://colab.research.google.com/drive/1Xgak40TuxWWLh5P5ARf0-4Xo0BcRn0Gd
2
+
3
+ import argparse
4
+ import os
5
+ import sys
6
+ import time
7
+ import traceback
8
+ import sklearn
9
+ import json
10
+ import tensorflow as tf
11
+ import keras
12
+ import keras_nlp
13
+ import keras.layers as kl
14
+ from keras.layers import Conv1D, MaxPooling1D, AveragePooling1D
15
+ from keras_nlp.layers import SinePositionEncoding, TransformerEncoder
16
+ from keras.layers import BatchNormalization
17
+ from keras.models import Sequential, Model, load_model
18
+ from keras.optimizers import Adam
19
+ from keras.callbacks import EarlyStopping, History, ModelCheckpoint
20
+ import pandas as pd
21
+ import numpy as np
22
+ import matplotlib.pyplot as plt
23
+ import seaborn as sns
24
+ from scipy import stats
25
+ from collections import Counter
26
+ from itertools import product
27
+ from sklearn.metrics import mean_squared_error
28
+
29
+ startTime=time.time()
30
+ import os
31
+ os.environ["CUDA_VISIBLE_DEVICES"] = "0"
32
+
33
+ def parse_arguments():
34
+ parser = argparse.ArgumentParser(description='DeepSTARR')
35
+ parser.add_argument('--config', type=str, default='config/config-conv-117.json', help='Configuration file path (default: config/config-conv-117.json)')
36
+ parser.add_argument('--indir', type=str, default='./DeepSTARR-Reimplementation-main/data/Sequences_activity_all.txt', help='Input data directory (default: ./DeepSTARR-Reimplementation-main/data/Sequences_activity_all.txt)')
37
+ parser.add_argument('--out_dir', type=str, default='output', help='Output directory (default: output)')
38
+ parser.add_argument('--label', type=str, default='baseline', help='Output label (default: baseline)')
39
+ return parser.parse_args()
40
+
41
+ def LoadConfig(config):
42
+ with open(config, 'r') as file:
43
+ params = json.load(file)
44
+ return params
45
+
46
+ def one_hot_encode(seq):
47
+ nucleotide_dict = {'A': [1, 0, 0, 0],
48
+ 'C': [0, 1, 0, 0],
49
+ 'G': [0, 0, 1, 0],
50
+ 'T': [0, 0, 0, 1],
51
+ 'N': [0, 0, 0, 0]}
52
+ return np.array([nucleotide_dict[nuc] for nuc in seq])
53
+
54
+ def kmer_encode(sequence, k=3):
55
+ sequence = sequence.upper()
56
+ kmers = [sequence[i:i+k] for i in range(len(sequence) - k + 1)]
57
+ kmer_counts = Counter(kmers)
58
+ return {kmer: kmer_counts.get(kmer, 0) / len(kmers) for kmer in [''.join(p) for p in product('ACGT', repeat=k)]}
59
+
60
+ def kmer_features(seq, k=3):
61
+ all_kmers = [''.join(p) for p in product('ACGT', repeat=k)]
62
+ feature_matrix = []
63
+ kmer_freqs = kmer_encode(seq, k)
64
+ feature_vector = [kmer_freqs[kmer] for kmer in all_kmers]
65
+ feature_matrix.append(feature_vector)
66
+ return np.array(feature_matrix)
67
+
68
+ def prepare_input(data_set, params):
69
+ if params['encode'] == 'one-hot':
70
+ seq_matrix = np.array(data_set['Sequence'].apply(one_hot_encode).tolist()) # (number of sequences, length of sequences, nucleotides)
71
+ elif params['encode'] == 'k-mer':
72
+ seq_matrix = np.array(data_set['Sequence'].apply(kmer_features, k=3).tolist()) # (number of sequences, 1, 4^k)
73
+ else:
74
+ raise Exception ('wrong encoding method')
75
+
76
+ Y_dev = data_set.Dev_log2_enrichment
77
+ Y_hk = data_set.Hk_log2_enrichment
78
+ Y = [Y_dev, Y_hk]
79
+
80
+ return seq_matrix, Y
81
+
82
+ def DeepSTARR(params):
83
+ if params['encode'] == 'one-hot':
84
+ input = kl.Input(shape=(249, 4))
85
+ elif params['encode'] == 'k-mer':
86
+ input = kl.Input(shape=(1, 64))
87
+
88
+ for i in range(params['convolution_layers']['n_layers']):
89
+ x = kl.Conv1D(params['convolution_layers']['filters'][i],
90
+ kernel_size = params['convolution_layers']['kernel_sizes'][i],
91
+ padding = params['pad'],
92
+ name=str('Conv1D_'+str(i+1)))(input)
93
+ x = kl.BatchNormalization()(x)
94
+ x = kl.Activation('relu')(x)
95
+ if params['encode'] == 'one-hot':
96
+ x = kl.MaxPooling1D(2)(x)
97
+
98
+ if params['dropout_conv'] == 'yes': x = kl.Dropout(params['dropout_prob'])(x)
99
+
100
+ # optional attention layers
101
+ for i in range(params['transformer_layers']['n_layers']):
102
+ if i == 0:
103
+ x = x + keras_nlp.layers.SinePositionEncoding()(x)
104
+ x = TransformerEncoder(intermediate_dim = params['transformer_layers']['attn_key_dim'][i],
105
+ num_heads = params['transformer_layers']['attn_heads'][i],
106
+ dropout = params['dropout_prob'])(x)
107
+
108
+ # After the convolutional layers, the output is flattened and passed through a series of fully connected/dense layers
109
+ # Flattening converts a multi-dimensional input (from the convolutions) into a one-dimensional array (to be connected with the fully connected layers
110
+ x = kl.Flatten()(x)
111
+
112
+ # Fully connected layers
113
+ # Each fully connected layer is followed by batch normalization, ReLU activation, and dropout
114
+ for i in range(params['n_dense_layer']):
115
+ x = kl.Dense(params['dense_neurons'+str(i+1)],
116
+ name=str('Dense_'+str(i+1)))(x)
117
+ x = kl.BatchNormalization()(x)
118
+ x = kl.Activation('relu')(x)
119
+ x = kl.Dropout(params['dropout_prob'])(x)
120
+
121
+ # Main model bottleneck
122
+ bottleneck = x
123
+
124
+ # heads per task (developmental and housekeeping enhancer activities)
125
+ # The final output layer is a pair of dense layers, one for each task (developmental and housekeeping enhancer activities), each with a single neuron and a linear activation function
126
+ tasks = ['Dev', 'Hk']
127
+ outputs = []
128
+ for task in tasks:
129
+ outputs.append(kl.Dense(1, activation='linear', name=str('Dense_' + task))(bottleneck))
130
+
131
+ # Build Keras model object
132
+ model = Model([input], outputs)
133
+ model.compile(Adam(learning_rate=params['lr']), # Adam optimizer
134
+ loss=['mse', 'mse'], # loss is Mean Squared Error (MSE)
135
+ loss_weights=[1, 1]) # in case we want to change the weights of each output. For now keep them with same weights
136
+
137
+ return model, params
138
+
139
+ def train(selected_model, X_train, Y_train, X_valid, Y_valid, params):
140
+ my_history=selected_model.fit(X_train, Y_train,
141
+ validation_data=(X_valid, Y_valid),
142
+ batch_size=params['batch_size'],
143
+ epochs=params['epochs'],
144
+ callbacks=[EarlyStopping(patience=params['early_stop'], monitor="val_loss", restore_best_weights=True), History()])
145
+
146
+ return selected_model, my_history
147
+
148
+ def summary_statistics(X, Y, set, task, main_model, main_params, out_dir):
149
+ pred = main_model.predict(X, batch_size=main_params['batch_size']) # predict
150
+ if task =="Dev":
151
+ i=0
152
+ if task =="Hk":
153
+ i=1
154
+ print(set + ' MSE ' + task + ' = ' + str("{0:0.2f}".format(mean_squared_error(Y, pred[i].squeeze()))))
155
+ print(set + ' PCC ' + task + ' = ' + str("{0:0.2f}".format(stats.pearsonr(Y, pred[i].squeeze())[0])))
156
+ print(set + ' SCC ' + task + ' = ' + str("{0:0.2f}".format(stats.spearmanr(Y, pred[i].squeeze())[0])))
157
+ return str("{0:0.2f}".format(stats.pearsonr(Y, pred[i].squeeze())[0]))
158
+
159
+ def main(config, indir, out_dir, label):
160
+ data = pd.read_table(indir)
161
+ params = LoadConfig(config)
162
+
163
+ X_train, Y_train = prepare_input(data[data['set'] == "Train"], params)
164
+ X_valid, Y_valid = prepare_input(data[data['set'] == "Val"], params)
165
+ X_test, Y_test = prepare_input(data[data['set'] == "Test"], params)
166
+
167
+ DeepSTARR(params)[0].summary()
168
+ DeepSTARR(params)[1]
169
+ main_model, main_params = DeepSTARR(params)
170
+ main_model, my_history = train(main_model, X_train, Y_train, X_valid, Y_valid, main_params)
171
+
172
+ endTime=time.time()
173
+ seconds=endTime-startTime
174
+ print("Total training time:",round(seconds/60,2),"minutes")
175
+
176
+ dev_results = summary_statistics(X_test, Y_test[0], "test", "Dev", main_model, main_params, out_dir)
177
+ hk_results = summary_statistics(X_test, Y_test[1], "test", "Hk", main_model, main_params, out_dir)
178
+
179
+ result = {
180
+ "AutoDNA": {
181
+ "means": {
182
+ "PCC(Dev)": dev_results,
183
+ "PCC(Hk)": hk_results
184
+ }
185
+ }
186
+ }
187
+
188
+ with open(f"{out_dir}/final_info.json", "w") as file:
189
+ json.dump(result, file, indent=4)
190
+
191
+ main_model.save(out_dir + '/' + label + '.h5')
192
+
193
+ if __name__ == "__main__":
194
+ try:
195
+ args = parse_arguments()
196
+ main(args.config, args.indir, args.out_dir, args.label)
197
+ except Exception as e:
198
+ print("Original error in subprocess:", flush=True)
199
+ if not os.path.exists(args.out_dir):
200
+ os.makedirs(args.out_dir)
201
+ traceback.print_exc(file=open(os.path.join(args.out_dir, "traceback.log"), "w"))
202
+ raise
203
+
204
+
205
+
206
+
examples/AutoEAP_UMI-STARR-seq/Baseline/final_info.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "AutoDNA":{
3
+ "means":{
4
+ "PCC(Dev)": 0.52,
5
+ "PCC(Hk)": 0.65
6
+ }
7
+ }
8
+ }
examples/AutoEAP_UMI-STARR-seq/Baseline/launcher.sh ADDED
@@ -0,0 +1 @@
 
 
1
+ python experiment.py --out_dir $1 > $1/train.log 2>&1
examples/AutoEAP_UMI-STARR-seq/HyenaMSTA+/config/config-conv-117.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "batch_size": 64,
3
+ "encode": "one-hot",
4
+ "epochs": 100,
5
+ "early_stop": 20,
6
+ "lr": 0.001,
7
+ "convolution_layers": {
8
+ "n_layers": 4,
9
+ "filters": [1024, 512, 256, 128],
10
+ "kernel_sizes": [8, 16, 32, 64]
11
+ },
12
+ "transformer_layers": {
13
+ "n_layers": 0,
14
+ "attn_key_dim": [16, 16, 16],
15
+ "attn_heads": [2048, 2048, 2048]
16
+ },
17
+ "n_dense_layer": 1,
18
+ "dense_neurons1": 64,
19
+ "dropout_conv": "yes",
20
+ "dropout_prob": 0.4,
21
+ "pad": "same"
22
+ }
examples/AutoEAP_UMI-STARR-seq/HyenaMSTA+/experiment.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # adapted from Deepstarr colab notebook: https://colab.research.google.com/drive/1Xgak40TuxWWLh5P5ARf0-4Xo0BcRn0Gd
2
+
3
+ import argparse
4
+ import os
5
+ import sys
6
+ import time
7
+ import traceback
8
+ import sklearn
9
+ import json
10
+ import tensorflow as tf
11
+ import keras
12
+ import keras_nlp
13
+ import keras.layers as kl
14
+ from keras.layers import Conv1D, MaxPooling1D, AveragePooling1D
15
+ from keras_nlp.layers import SinePositionEncoding, TransformerEncoder
16
+ from keras.layers import BatchNormalization
17
+ from keras.models import Sequential, Model, load_model
18
+ from keras.optimizers import Adam
19
+ from keras.callbacks import EarlyStopping, History, ModelCheckpoint
20
+ import pandas as pd
21
+ import numpy as np
22
+ import matplotlib.pyplot as plt
23
+ import seaborn as sns
24
+ from scipy import stats
25
+ from collections import Counter
26
+ from itertools import product
27
+ from sklearn.metrics import mean_squared_error
28
+ from hyenamsta_model import HyenaMSTAPlus
29
+
30
+ startTime=time.time()
31
+ import os
32
+ os.environ["CUDA_VISIBLE_DEVICES"] = "1"
33
+
34
+ def parse_arguments():
35
+ parser = argparse.ArgumentParser(description='DeepSTARR')
36
+ parser.add_argument('--config', type=str, default='config/config-conv-117.json', help='Configuration file path (default: config/config-conv-117.json)')
37
+ parser.add_argument('--indir', type=str, default='./DeepSTARR-Reimplementation-main/data/Sequences_activity_all.txt', help='Input data directory (default: ./DeepSTARR-Reimplementation-main/data/Sequences_activity_all.txt)')
38
+ parser.add_argument('--out_dir', type=str, default='output', help='Output directory (default: output)')
39
+ parser.add_argument('--label', type=str, default='hyenamsta_plus', help='Output label (default: hyenamsta_plus)')
40
+ parser.add_argument('--model_type', type=str, default='hyenamsta_plus', help='Model type to use: "deepstarr" or "hyenamsta_plus" (default: hyenamsta_plus)')
41
+ parser.add_argument('--num_motifs', type=int, default=48, help='Number of motifs for CA-MSTA (default: 48)')
42
+ parser.add_argument('--motif_dim', type=int, default=96, help='Dimension of motif embeddings (default: 96)')
43
+ parser.add_argument('--ca_msta_heads', type=int, default=8, help='Number of attention heads in CA-MSTA (default: 8)')
44
+ parser.add_argument('--l2_reg', type=float, default=1e-6, help='L2 regularization strength (default: 1e-6)')
45
+ return parser.parse_args()
46
+
47
+ def LoadConfig(config, args):
48
+ with open(config, 'r') as file:
49
+ params = json.load(file)
50
+
51
+ # Add HyenaMSTA+ specific parameters
52
+ params['model_type'] = args.model_type
53
+ params['num_motifs'] = args.num_motifs
54
+ params['motif_dim'] = args.motif_dim
55
+ params['ca_msta_heads'] = args.ca_msta_heads
56
+ params['l2_reg'] = args.l2_reg
57
+
58
+ return params
59
+
60
+ def one_hot_encode(seq):
61
+ nucleotide_dict = {'A': [1, 0, 0, 0],
62
+ 'C': [0, 1, 0, 0],
63
+ 'G': [0, 0, 1, 0],
64
+ 'T': [0, 0, 0, 1],
65
+ 'N': [0, 0, 0, 0]}
66
+ return np.array([nucleotide_dict[nuc] for nuc in seq])
67
+
68
+ def kmer_encode(sequence, k=3):
69
+ sequence = sequence.upper()
70
+ kmers = [sequence[i:i+k] for i in range(len(sequence) - k + 1)]
71
+ kmer_counts = Counter(kmers)
72
+ return {kmer: kmer_counts.get(kmer, 0) / len(kmers) for kmer in [''.join(p) for p in product('ACGT', repeat=k)]}
73
+
74
+ def kmer_features(seq, k=3):
75
+ all_kmers = [''.join(p) for p in product('ACGT', repeat=k)]
76
+ feature_matrix = []
77
+ kmer_freqs = kmer_encode(seq, k)
78
+ feature_vector = [kmer_freqs[kmer] for kmer in all_kmers]
79
+ feature_matrix.append(feature_vector)
80
+ return np.array(feature_matrix)
81
+
82
+ def prepare_input(data_set, params):
83
+ if params['encode'] == 'one-hot':
84
+ seq_matrix = np.array(data_set['Sequence'].apply(one_hot_encode).tolist()) # (number of sequences, length of sequences, nucleotides)
85
+ elif params['encode'] == 'k-mer':
86
+ seq_matrix = np.array(data_set['Sequence'].apply(kmer_features, k=3).tolist()) # (number of sequences, 1, 4^k)
87
+ else:
88
+ raise Exception ('wrong encoding method')
89
+
90
+ Y_dev = data_set.Dev_log2_enrichment
91
+ Y_hk = data_set.Hk_log2_enrichment
92
+ Y = [Y_dev, Y_hk]
93
+
94
+ return seq_matrix, Y
95
+
96
+ def DeepSTARR(params):
97
+ if params['encode'] == 'one-hot':
98
+ input = kl.Input(shape=(249, 4))
99
+ elif params['encode'] == 'k-mer':
100
+ input = kl.Input(shape=(1, 64))
101
+
102
+ for i in range(params['convolution_layers']['n_layers']):
103
+ x = kl.Conv1D(params['convolution_layers']['filters'][i],
104
+ kernel_size = params['convolution_layers']['kernel_sizes'][i],
105
+ padding = params['pad'],
106
+ name=str('Conv1D_'+str(i+1)))(input)
107
+ x = kl.BatchNormalization()(x)
108
+ x = kl.Activation('relu')(x)
109
+ if params['encode'] == 'one-hot':
110
+ x = kl.MaxPooling1D(2)(x)
111
+
112
+ if params['dropout_conv'] == 'yes': x = kl.Dropout(params['dropout_prob'])(x)
113
+
114
+ # optional attention layers
115
+ for i in range(params['transformer_layers']['n_layers']):
116
+ if i == 0:
117
+ x = x + keras_nlp.layers.SinePositionEncoding()(x)
118
+ x = TransformerEncoder(intermediate_dim = params['transformer_layers']['attn_key_dim'][i],
119
+ num_heads = params['transformer_layers']['attn_heads'][i],
120
+ dropout = params['dropout_prob'])(x)
121
+
122
+ # After the convolutional layers, the output is flattened and passed through a series of fully connected/dense layers
123
+ # Flattening converts a multi-dimensional input (from the convolutions) into a one-dimensional array (to be connected with the fully connected layers
124
+ x = kl.Flatten()(x)
125
+
126
+ # Fully connected layers
127
+ # Each fully connected layer is followed by batch normalization, ReLU activation, and dropout
128
+ for i in range(params['n_dense_layer']):
129
+ x = kl.Dense(params['dense_neurons'+str(i+1)],
130
+ name=str('Dense_'+str(i+1)))(x)
131
+ x = kl.BatchNormalization()(x)
132
+ x = kl.Activation('relu')(x)
133
+ x = kl.Dropout(params['dropout_prob'])(x)
134
+
135
+ # Main model bottleneck
136
+ bottleneck = x
137
+
138
+ # heads per task (developmental and housekeeping enhancer activities)
139
+ # The final output layer is a pair of dense layers, one for each task (developmental and housekeeping enhancer activities), each with a single neuron and a linear activation function
140
+ tasks = ['Dev', 'Hk']
141
+ outputs = []
142
+ for task in tasks:
143
+ outputs.append(kl.Dense(1, activation='linear', name=str('Dense_' + task))(bottleneck))
144
+
145
+ # Build Keras model object
146
+ model = Model([input], outputs)
147
+ model.compile(Adam(learning_rate=params['lr']), # Adam optimizer
148
+ loss=['mse', 'mse'], # loss is Mean Squared Error (MSE)
149
+ loss_weights=[1, 1]) # in case we want to change the weights of each output. For now keep them with same weights
150
+
151
+ return model, params
152
+
153
+ def train(selected_model, X_train, Y_train, X_valid, Y_valid, params):
154
+ callbacks = [
155
+ EarlyStopping(patience=params['early_stop'], monitor="val_loss", restore_best_weights=True),
156
+ History()
157
+ ]
158
+
159
+ # Add learning rate scheduler if enabled
160
+ if params.get('lr_schedule', False):
161
+ def lr_scheduler(epoch, lr):
162
+ if epoch < 20: # Longer warm-up period
163
+ return lr
164
+ else:
165
+ return lr * tf.math.exp(-0.03) # Gentler decay
166
+
167
+ callbacks.append(tf.keras.callbacks.LearningRateScheduler(lr_scheduler))
168
+
169
+ my_history = selected_model.fit(
170
+ X_train, Y_train,
171
+ validation_data=(X_valid, Y_valid),
172
+ batch_size=params['batch_size'],
173
+ epochs=params['epochs'],
174
+ callbacks=callbacks
175
+ )
176
+
177
+ return selected_model, my_history
178
+
179
+ def summary_statistics(X, Y, set, task, main_model, main_params, out_dir):
180
+ pred = main_model.predict(X, batch_size=main_params['batch_size']) # predict
181
+ if task =="Dev":
182
+ i=0
183
+ if task =="Hk":
184
+ i=1
185
+ print(set + ' MSE ' + task + ' = ' + str("{0:0.2f}".format(mean_squared_error(Y, pred[i].squeeze()))))
186
+ print(set + ' PCC ' + task + ' = ' + str("{0:0.2f}".format(stats.pearsonr(Y, pred[i].squeeze())[0])))
187
+ print(set + ' SCC ' + task + ' = ' + str("{0:0.2f}".format(stats.spearmanr(Y, pred[i].squeeze())[0])))
188
+ return str("{0:0.2f}".format(stats.pearsonr(Y, pred[i].squeeze())[0]))
189
+
190
+ def main(config, indir, out_dir, label, args):
191
+ data = pd.read_table(indir)
192
+ params = LoadConfig(config, args)
193
+
194
+ X_train, Y_train = prepare_input(data[data['set'] == "Train"], params)
195
+ X_valid, Y_valid = prepare_input(data[data['set'] == "Val"], params)
196
+ X_test, Y_test = prepare_input(data[data['set'] == "Test"], params)
197
+
198
+ # Select model based on model_type parameter
199
+ if params['model_type'] == 'deepstarr':
200
+ main_model, main_params = DeepSTARR(params)
201
+ main_model.summary()
202
+ else: # hyenamsta_plus
203
+ main_model, main_params = HyenaMSTAPlus(params)
204
+ main_model.summary()
205
+ main_model, my_history = train(main_model, X_train, Y_train, X_valid, Y_valid, main_params)
206
+
207
+ endTime=time.time()
208
+ seconds=endTime-startTime
209
+ print("Total training time:",round(seconds/60,2),"minutes")
210
+
211
+ dev_results = summary_statistics(X_test, Y_test[0], "test", "Dev", main_model, main_params, out_dir)
212
+ hk_results = summary_statistics(X_test, Y_test[1], "test", "Hk", main_model, main_params, out_dir)
213
+
214
+ result = {
215
+ "AutoDNA": {
216
+ "means": {
217
+ "PCC(Dev)": dev_results,
218
+ "PCC(Hk)": hk_results
219
+ }
220
+ }
221
+ }
222
+
223
+ with open(f"{out_dir}/final_info.json", "w") as file:
224
+ json.dump(result, file, indent=4)
225
+
226
+ main_model.save(out_dir + '/' + label + '.h5')
227
+
228
+ if __name__ == "__main__":
229
+ try:
230
+ args = parse_arguments()
231
+ main(args.config, args.indir, args.out_dir, args.label, args)
232
+ except Exception as e:
233
+ print("Original error in subprocess:", flush=True)
234
+ if not os.path.exists(args.out_dir):
235
+ os.makedirs(args.out_dir)
236
+ traceback.print_exc(file=open(os.path.join(args.out_dir, "traceback.log"), "w"))
237
+ raise
238
+
239
+
240
+
241
+
examples/AutoEAP_UMI-STARR-seq/HyenaMSTA+/hyenamsta_model.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import keras
3
+ import keras.layers as kl
4
+ from keras_nlp.layers import SinePositionEncoding, TransformerEncoder
5
+
6
+ class EnhancedHyenaPlusLayer(kl.Layer):
7
+ """
8
+ Enhanced Hyena+DNA layer with multi-scale feature extraction, residual connections,
9
+ explicit dimension alignment, and layer normalization for improved gradient flow and stability.
10
+ """
11
+ def __init__(self, filters, kernel_size, output_dim, use_residual=True, dilation_rate=1,
12
+ kernel_regularizer=None, **kwargs):
13
+ super(EnhancedHyenaPlusLayer, self).__init__(**kwargs)
14
+ self.filters = filters
15
+ self.kernel_size = kernel_size
16
+ self.output_dim = output_dim
17
+ self.use_residual = use_residual
18
+ self.dilation_rate = dilation_rate
19
+ self.kernel_regularizer = kernel_regularizer
20
+
21
+ # Core convolution for long-range dependencies with mild regularization
22
+ self.conv = kl.Conv1D(filters, kernel_size, padding='same',
23
+ kernel_regularizer=kernel_regularizer)
24
+
25
+ # Multi-scale feature extraction with dilated convolutions
26
+ self.dilated_conv = kl.Conv1D(filters // 2, kernel_size,
27
+ padding='same',
28
+ dilation_rate=dilation_rate,
29
+ kernel_regularizer=kernel_regularizer)
30
+
31
+ # Parallel small kernel convolution for local features
32
+ self.local_conv = kl.Conv1D(filters // 2, 3, padding='same',
33
+ kernel_regularizer=kernel_regularizer)
34
+
35
+ # Batch normalization and activation
36
+ self.batch_norm = kl.BatchNormalization()
37
+ self.activation = kl.Activation('relu')
38
+
39
+ # Feature fusion layer
40
+ self.fusion = kl.Dense(filters, kernel_regularizer=kernel_regularizer)
41
+
42
+ # Explicit dimension alignment projection with regularization
43
+ self.projection = kl.Dense(output_dim, kernel_regularizer=kernel_regularizer)
44
+
45
+ # Layer normalization for stability
46
+ self.layer_norm = kl.LayerNormalization()
47
+
48
+ # Input projection for residual connection if dimensions don't match
49
+ self.input_projection = None
50
+ if use_residual:
51
+ self.input_projection = kl.Dense(output_dim, kernel_regularizer=kernel_regularizer)
52
+
53
+ def call(self, inputs, training=None):
54
+ # Save input for residual connection
55
+ residual = inputs
56
+
57
+ # Process through main convolution
58
+ x_main = self.conv(inputs)
59
+
60
+ # Process through dilated convolution for capturing long-range patterns
61
+ x_dilated = self.dilated_conv(inputs)
62
+
63
+ # Process through local convolution for capturing local patterns
64
+ x_local = self.local_conv(inputs)
65
+
66
+ # Concatenate multi-scale features
67
+ x_multi = tf.concat([x_dilated, x_local], axis=-1)
68
+
69
+ # Fuse features
70
+ x = self.fusion(x_multi) + x_main
71
+
72
+ x = self.batch_norm(x, training=training)
73
+ x = self.activation(x)
74
+
75
+ # Project to target dimension
76
+ x = self.projection(x)
77
+
78
+ # Add residual connection if enabled
79
+ if self.use_residual:
80
+ # Project input if needed for dimension matching
81
+ residual = self.input_projection(residual)
82
+ x = x + residual
83
+
84
+ # Apply layer normalization
85
+ x = self.layer_norm(x)
86
+
87
+ return x
88
+
89
+ def get_config(self):
90
+ config = super(EnhancedHyenaPlusLayer, self).get_config()
91
+ config.update({
92
+ 'filters': self.filters,
93
+ 'kernel_size': self.kernel_size,
94
+ 'output_dim': self.output_dim,
95
+ 'use_residual': self.use_residual,
96
+ 'dilation_rate': self.dilation_rate,
97
+ 'kernel_regularizer': self.kernel_regularizer
98
+ })
99
+ return config
100
+
101
+ class HybridContextAwareMSTA(kl.Layer):
102
+ """
103
+ Hybrid Context-Aware Motif-Specific Transformer Attention (HCA-MSTA) module
104
+ with enhanced biological interpretability and selective motif attention.
105
+ Combines the strengths of previous approaches with improved positional encoding.
106
+ """
107
+ def __init__(self, num_motifs, motif_dim, num_heads=4, dropout_rate=0.1,
108
+ kernel_regularizer=None, activity_regularizer=None, **kwargs):
109
+ super(HybridContextAwareMSTA, self).__init__(**kwargs)
110
+ self.num_motifs = num_motifs
111
+ self.motif_dim = motif_dim
112
+ self.num_heads = num_heads
113
+ self.dropout_rate = dropout_rate
114
+ self.kernel_regularizer = kernel_regularizer
115
+ self.activity_regularizer = activity_regularizer
116
+
117
+ # Motif embeddings with mild regularization
118
+ self.motif_embeddings = self.add_weight(
119
+ shape=(num_motifs, motif_dim),
120
+ initializer='glorot_uniform',
121
+ regularizer=activity_regularizer,
122
+ trainable=True,
123
+ name='motif_embeddings'
124
+ )
125
+
126
+ # Positional encoding for motifs
127
+ self.motif_position_encoding = self.add_weight(
128
+ shape=(num_motifs, motif_dim),
129
+ initializer='glorot_uniform',
130
+ trainable=True,
131
+ name='motif_position_encoding'
132
+ )
133
+
134
+ # Biological prior weights for motifs (importance weights)
135
+ self.motif_importance = self.add_weight(
136
+ shape=(num_motifs, 1),
137
+ initializer='ones',
138
+ regularizer=activity_regularizer,
139
+ trainable=True,
140
+ name='motif_importance'
141
+ )
142
+
143
+ # Attention mechanism components with regularization
144
+ self.query_dense = kl.Dense(motif_dim, kernel_regularizer=kernel_regularizer)
145
+ self.key_dense = kl.Dense(motif_dim, kernel_regularizer=kernel_regularizer)
146
+ self.value_dense = kl.Dense(motif_dim, kernel_regularizer=kernel_regularizer)
147
+
148
+ # Multi-head attention
149
+ self.attention = kl.MultiHeadAttention(
150
+ num_heads=num_heads,
151
+ key_dim=motif_dim // num_heads,
152
+ dropout=dropout_rate
153
+ )
154
+
155
+ # Gating mechanism
156
+ self.gate_dense = kl.Dense(motif_dim, activation='sigmoid',
157
+ kernel_regularizer=kernel_regularizer)
158
+
159
+ # Output projection
160
+ self.output_dense = kl.Dense(motif_dim, kernel_regularizer=kernel_regularizer)
161
+ self.dropout = kl.Dropout(dropout_rate)
162
+ self.layer_norm = kl.LayerNormalization()
163
+
164
+ # Feed-forward network for feature enhancement
165
+ self.ffn_dense1 = kl.Dense(motif_dim * 2, activation='relu',
166
+ kernel_regularizer=kernel_regularizer)
167
+ self.ffn_dense2 = kl.Dense(motif_dim, kernel_regularizer=kernel_regularizer)
168
+ self.ffn_layer_norm = kl.LayerNormalization()
169
+ self.ffn_dropout = kl.Dropout(dropout_rate)
170
+
171
+ def positional_masking(self, sequence_embeddings, motif_embeddings):
172
+ """
173
+ Generate hybrid positional masking based on sequence and motif relevance
174
+ with improved biological context awareness and motif importance weighting.
175
+ Combines inverse distance and Gaussian approaches for better biological relevance.
176
+ """
177
+ # Calculate similarity between sequence embeddings and motif embeddings
178
+ similarity = tf.matmul(sequence_embeddings, tf.transpose(motif_embeddings, [0, 2, 1]))
179
+
180
+ # Scale similarity scores for numerical stability
181
+ scaled_similarity = similarity / tf.sqrt(tf.cast(self.motif_dim, tf.float32))
182
+
183
+ # Apply softmax to get attention-like weights
184
+ attention_weights = tf.nn.softmax(scaled_similarity, axis=-1)
185
+
186
+ # Calculate position-aware weights with hybrid approach
187
+ seq_length = tf.shape(sequence_embeddings)[1]
188
+ motif_length = tf.shape(motif_embeddings)[1]
189
+
190
+ # Create position indices
191
+ position_indices = tf.range(seq_length)[:, tf.newaxis] - tf.range(motif_length)[tf.newaxis, :]
192
+ position_indices_float = tf.cast(position_indices, tf.float32)
193
+
194
+ # Inverse distance weighting (for local context)
195
+ inverse_weights = 1.0 / (1.0 + tf.abs(position_indices_float))
196
+
197
+ # Gaussian weighting (for smooth transitions)
198
+ gaussian_weights = tf.exp(-0.5 * tf.square(position_indices_float / 8.0)) # Gaussian with σ=8
199
+
200
+ # Combine both weighting schemes for a hybrid approach
201
+ # This captures both sharp local context and smooth transitions
202
+ position_weights = 0.5 * inverse_weights + 0.5 * gaussian_weights
203
+ position_weights = tf.expand_dims(position_weights, 0) # Add batch dimension
204
+
205
+ # Apply motif importance weighting with temperature scaling for sharper focus
206
+ motif_weights = tf.nn.softmax(self.motif_importance * 1.5, axis=0) # Temperature scaling
207
+ motif_weights = tf.expand_dims(tf.expand_dims(motif_weights, 0), 1) # [1, 1, num_motifs, 1]
208
+
209
+ # Combine attention weights with position weights and motif importance
210
+ combined_weights = attention_weights * position_weights * tf.squeeze(motif_weights, -1)
211
+
212
+ return combined_weights
213
+
214
+ def call(self, inputs, training=None):
215
+ # Add positional encoding to motif embeddings
216
+ batch_size = tf.shape(inputs)[0]
217
+
218
+ # Expand motif embeddings and position encodings to batch dimension
219
+ motifs = tf.tile(tf.expand_dims(self.motif_embeddings, 0), [batch_size, 1, 1])
220
+ pos_encoding = tf.tile(tf.expand_dims(self.motif_position_encoding, 0), [batch_size, 1, 1])
221
+
222
+ # Add positional encoding to motifs
223
+ motifs_with_pos = motifs + pos_encoding
224
+
225
+ # Prepare query from input sequence embeddings
226
+ query = self.query_dense(inputs)
227
+
228
+ # Prepare key and value from motifs with positional encoding
229
+ key = self.key_dense(motifs_with_pos)
230
+ value = self.value_dense(motifs_with_pos)
231
+
232
+ # Generate positional masking
233
+ pos_mask = self.positional_masking(query, motifs_with_pos)
234
+
235
+ # Apply attention with positional masking
236
+ attention_output = self.attention(
237
+ query=query,
238
+ key=key,
239
+ value=value,
240
+ attention_mask=pos_mask,
241
+ training=training
242
+ )
243
+
244
+ # Apply gating mechanism to selectively focus on relevant features
245
+ gate = self.gate_dense(inputs)
246
+ gated_attention = gate * attention_output
247
+
248
+ # Process through output projection with residual connection
249
+ output = self.output_dense(gated_attention)
250
+ output = self.dropout(output, training=training)
251
+ output = self.layer_norm(output + inputs) # Residual connection
252
+
253
+ # Apply feed-forward network with residual connection
254
+ ffn_output = self.ffn_dense1(output)
255
+ ffn_output = self.ffn_dense2(ffn_output)
256
+ ffn_output = self.ffn_dropout(ffn_output, training=training)
257
+ final_output = self.ffn_layer_norm(output + ffn_output) # Residual connection
258
+
259
+ return final_output
260
+
261
+ def get_config(self):
262
+ config = super(HybridContextAwareMSTA, self).get_config()
263
+ config.update({
264
+ 'num_motifs': self.num_motifs,
265
+ 'motif_dim': self.motif_dim,
266
+ 'num_heads': self.num_heads,
267
+ 'dropout_rate': self.dropout_rate,
268
+ 'kernel_regularizer': self.kernel_regularizer,
269
+ 'activity_regularizer': self.activity_regularizer
270
+ })
271
+ return config
272
+
273
+ def HyenaMSTAPlus(params):
274
+ """
275
+ Enhanced HyenaMSTA+ model for enhancer activity prediction with multi-scale feature
276
+ extraction, hybrid attention mechanism, and improved biological context modeling.
277
+ """
278
+ if params['encode'] == 'one-hot':
279
+ input_layer = kl.Input(shape=(249, 4))
280
+ elif params['encode'] == 'k-mer':
281
+ input_layer = kl.Input(shape=(1, 64))
282
+
283
+ # Regularization settings - milder than previous run
284
+ l2_reg = params.get('l2_reg', 1e-6)
285
+ kernel_regularizer = tf.keras.regularizers.l2(l2_reg)
286
+ activity_regularizer = tf.keras.regularizers.l1(l2_reg/20)
287
+
288
+ # Hyena+DNA processing
289
+ x = input_layer
290
+ hyena_layers = []
291
+
292
+ # Number of motifs and embedding dimension - optimized based on previous runs
293
+ num_motifs = params.get('num_motifs', 48) # Adjusted to optimal value from Run 2
294
+ motif_dim = params.get('motif_dim', 96) # Adjusted to optimal value from Run 2
295
+
296
+ # Apply Enhanced Hyena+DNA layers with increasing dilation rates
297
+ for i in range(params['convolution_layers']['n_layers']):
298
+ # Use increasing dilation rates for broader receptive field
299
+ dilation_rate = 2**min(i, 2) # 1, 2, 4 (capped at 4 to avoid excessive sparsity)
300
+
301
+ hyena_layer = EnhancedHyenaPlusLayer(
302
+ filters=params['convolution_layers']['filters'][i],
303
+ kernel_size=params['convolution_layers']['kernel_sizes'][i],
304
+ output_dim=motif_dim,
305
+ dilation_rate=dilation_rate,
306
+ kernel_regularizer=kernel_regularizer,
307
+ name=f'EnhancedHyenaPlus_{i+1}'
308
+ )
309
+ x = hyena_layer(x)
310
+ hyena_layers.append(x)
311
+
312
+ if params['encode'] == 'one-hot':
313
+ x = kl.MaxPooling1D(2)(x)
314
+
315
+ if params['dropout_conv'] == 'yes':
316
+ x = kl.Dropout(params['dropout_prob'])(x)
317
+
318
+ # Hybrid Context-Aware MSTA processing
319
+ ca_msta = HybridContextAwareMSTA(
320
+ num_motifs=num_motifs,
321
+ motif_dim=motif_dim,
322
+ num_heads=params.get('ca_msta_heads', 8),
323
+ dropout_rate=params['dropout_prob'],
324
+ kernel_regularizer=kernel_regularizer,
325
+ activity_regularizer=activity_regularizer
326
+ )
327
+
328
+ x = ca_msta(x)
329
+
330
+ # Flatten and dense layers
331
+ x = kl.Flatten()(x)
332
+
333
+ # Fully connected layers
334
+ for i in range(params['n_dense_layer']):
335
+ x = kl.Dense(params['dense_neurons'+str(i+1)],
336
+ name=str('Dense_'+str(i+1)))(x)
337
+ x = kl.BatchNormalization()(x)
338
+ x = kl.Activation('relu')(x)
339
+ x = kl.Dropout(params['dropout_prob'])(x)
340
+
341
+ # Main model bottleneck
342
+ bottleneck = x
343
+
344
+ # Heads per task (developmental and housekeeping enhancer activities)
345
+ tasks = ['Dev', 'Hk']
346
+ outputs = []
347
+ for task in tasks:
348
+ outputs.append(kl.Dense(1, activation='linear', name=str('Dense_' + task))(bottleneck))
349
+
350
+ # Build Keras model
351
+ model = keras.models.Model([input_layer], outputs)
352
+ model.compile(
353
+ keras.optimizers.Adam(learning_rate=params['lr']),
354
+ loss=['mse', 'mse'],
355
+ loss_weights=[1, 1]
356
+ )
357
+
358
+ return model, params
examples/AutoEAP_UMI-STARR-seq/HyenaMSTA+/idea.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "HyenaMSTA+",
3
+ "title": "Enhanced Hybrid Genomic Enhancer Activity Model with Context-Aware Hyena+DNA and Improved Biological-Motif Transformer Attention",
4
+ "description": "The refined model, HyenaMSTA+, introduces two major enhancements to its architecture for predicting enhancer activity from DNA sequences. First, it improves the contextual modeling of genomic sequences by employing a modified version of HyenaDNA, termed Hyena+DNA, which includes explicit embedding dimensional alignment and layer-wise normalization for robust downstream processing. Second, the Motif-Specific Transformer Attention (MSTA) module is augmented with a context-aware soft-attention mechanism that explicitly incorporates positionally-aware motif embeddings, thus improving its biological interpretability and attention clarity. These improvements directly address critiques related to the theoretical formulation, reproducibility, and implementation feasibility of the hybrid model, while leveraging insights from the reviewed literature.",
5
+ "statement": "The novelty of HyenaMSTA+ lies in the integration of two advancements: (1) Hyena+DNA, a contextually fortified version of HyenaDNA, which explicitly aligns embedding dimensions and introduces layer-wise normalization for smoother transitions to downstream modules; and (2) the biologically-informed Context-Aware Motif-Specific Transformer Attention (CA-MSTA), which extends the Transformer attention mechanism with positional encoding of motif regions, ensuring biologically interpretable and context-sensitive regulatory motif identification. These advancements bridge critical gaps in genomic sequence modeling by synthesizing efficient long-range dependency capturing with motif-specific attention mechanisms optimized for developmental and housekeeping enhancer activity prediction.",
6
+ "method": "### System Architecture Overview\nThe HyenaMSTA+ model predicts enhancer activities by processing DNA sequences through two core components:\n1. **Hyena+DNA:** A modified variant of the HyenaDNA architecture designed for enhanced contextual modeling.\n2. **Context-Aware Motif-Specific Transformer Attention (CA-MSTA):** A biologically-informed Transformer extension tailored for genomic tasks.\n\n### Key Refinements\n#### 1. Hyena+DNA\nThe Hyena+DNA component builds on the original HyenaDNA model with two critical modifications:\n- **Explicit Dimension Alignment**: Explicit projection layers ensure that the embedding dimension \\(d\\) of Hyena+DNA's outputs precisely matches the input dimensions expected by CA-MSTA. This projection is defined as:\n\\[\n\\mathbf{h}'_{\\text{Hyena}} = \\text{Projection}(\\mathbf{h}_{\\text{Hyena}}; \\mathbf{W}_{P}) = \\mathbf{h}_{\\text{Hyena}} \\mathbf{W}_{P}, \\quad \\mathbf{W}_{P} \\in \\mathbb{R}^{d_{\\text{Hyena}} \\times d}\\]\nwhere \\( \\mathbf{h}_{\\text{Hyena}} \\) is the original HyenaDNA output, and \\( \\mathbf{W}_{P} \\) is a trainable projection matrix.\n\n- **Layer-Wise Normalization:** To improve numerical stability and compatibility with downstream modules, layer normalization is applied to the embeddings across all Hyena+DNA layers:\n\\[\n\\mathbf{h}_{\\text{Norm}}^{(l)} = \\text{LayerNorm}(\\mathbf{h}^{(l)}_{\\text{Hyena}}), \\quad l = 1, 2, \\dots, L_{\\text{Hyena}}.\\]\n\n#### 2. Context-Aware Motif-Specific Transformer Attention (CA-MSTA)\nThe CA-MSTA module refines the motif-specific Transformer attention by incorporating positional encoding of motif regions and dynamic contextual weighting of motifs:\n- **Positional Encodings for Motif Embeddings:** Given \\( \\mathbf{m} \\in \\mathbb{R}^{M \\times d}\\) (motif embeddings), a learned positional encoding \\( \\mathbf{P}_{\\text{motifs}} \\in \\mathbb{R}^{M \\times d} \\) is added to represent spatial relevance:\n\\[\n\\mathbf{m}' = \\mathbf{m} + \\mathbf{P}_{\\text{motifs}}.\n\\]\n\n- **Contextual Attention Scores:** The attention mechanism in CA-MSTA now dynamically incorporates sequence context, weighted by positional motif interactions:\n\\[\n\\mathbf{A} = \\text{softmax}\\left( \\frac{\\mathbf{h}'_{\\text{Hyena}} \\mathbf{W}_{Q} \\left( \\mathbf{m}' \\mathbf{W}_{K} \\right)^T + \\mathbf{p}}{\\sqrt{d}} \\right), \\quad \\mathbf{p} = \\text{PositionalMasking}(\\mathbf{h}'_{\\text{Hyena}}, \\mathbf{m}').\\]\nHere, \\( \\mathbf{W}_{Q}, \\mathbf{W}_{K}, \\mathbf{W}_{V} \\) are trainable weight matrices, and \\( \\mathbf{p} \\) adjusts attention weights dynamically based on motif relevance.\n\n- **Final Contextual Aggregation:** Contextualized embeddings \\( \\mathbf{h}_{\\text{CA-MSTA}} \\) are computed as:\n\\[\n\\mathbf{h}_{\\text{CA-MSTA}} = \\mathbf{A}(\\mathbf{m}' \\mathbf{W}_{V}).\n\\]\n\n#### 3. Prediction Module\nThe aggregated embeddings from CA-MSTA are flattened and passed through separate dense layers for developmental and housekeeping enhancer predictions:\n\\[\n\\hat{y}_{\\text{dev}} = \\text{Dense}(\\text{Flatten}(\\mathbf{h}_{\\text{CA-MSTA}})), \\quad \\hat{y}_{\\text{hk}} = \\text{Dense}(\\text{Flatten}(\\mathbf{h}_{\\text{CA-MSTA}})).\n\\]\n\n### Enhanced Pseudocode\n```plaintext\nInput: DNA sequence \\( \\mathbf{x} \\), parameters \\( \\theta_{\\text{Hyena+DNA}}, \\theta_{\\text{CA-MSTA}}, \\theta_{\\text{Dense}} \\).\nOutput: Enhancer activities \\( \\hat{y}_{\\text{dev}}, \\hat{y}_{\\text{hk}} \\).\n\n1. Encode sequence: \\( \\mathbf{x} \\leftarrow \\text{OneHot} ( \\mathbf{x} ) \\).\n2. Hyena+DNA Processing:\n a. Capture long-range interactions: \\( \\mathbf{h}_{\\text{Hyena}} \\leftarrow f_{\\text{HyenaDNA}}(\\mathbf{x}). \\)\n b. Project to match downstream dimension: \\( \\mathbf{h}'_{\\text{Hyena}} \\leftarrow \\text{Projection}(\\mathbf{h}_{\\text{Hyena}}). \\)\n c. Aggregate normalized layers: \\( \\mathbf{h}_{\\text{Norm}} \\leftarrow \\text{LayerNorm}(\\mathbf{h}'_{\\text{Hyena}}). \\)\n3. CA-MSTA Processing:\n a. Add positional encoding to motifs: \\( \\mathbf{m}' \\leftarrow \\mathbf{m} + \\mathbf{P}_{\\text{motifs}}. \\)\n b. Compute context-aware attention: \\( \\mathbf{A} \\leftarrow \\text{Softmax}(\\text{Score}). \\)\n c. Aggregate context: \\( \\mathbf{h}_{\\text{CA-MSTA}} \\leftarrow \\mathbf{A}(\\mathbf{m}' \\mathbf{W}_{V}). \\)\n4. Predict enhancer activities:\n a. Developmental enhancer: \\( \\hat{y}_{\\text{dev}} \\leftarrow \\text{Dense}(\\text{Flatten}(\\mathbf{h}_{\\text{CA-MSTA}})). \\)\n b. Housekeeping enhancer: \\( \\hat{y}_{\\text{hk}} \\leftarrow \\text{Dense}(\\text{Flatten}(\\mathbf{h}_{\\text{CA-MSTA}})). \\).\n```\n\n### Addressed Critiques\n- **Mathematical Formulation (Critique 1):** Dimensions, normalization steps, and projection layers are explicitly defined to ensure seamless integration.\n- **Reproducibility (Critique 9):** Detailed parameter initialization and module flow ensure end-to-end implementation feasibility.\n- **Biological Interpretability (Critique 8):** Motif embedding updates with positional context improve interpretability and align with genomic relevance research.\n\n### Theoretical Contributions\n1. Enhanced stability and efficiency for long-range genomic modeling by improving Hyena+DNA with layer normalization and explicit embedding projection.\n2. Improved biological plausibility and fine-tuning flexibility with the addition of positional encodings in motif-specific Transformer attention mechanisms, boosting scientific insights on enhancer activity prediction."
7
+ }
examples/AutoEAP_UMI-STARR-seq/HyenaMSTA+/launcher.sh ADDED
@@ -0,0 +1 @@
 
 
1
+ python experiment.py --out_dir $1 > $1/train.log 2>&1
examples/AutoEAP_UMI-STARR-seq/HyenaMSTA+/res/final_info.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "AutoDNA": {
3
+ "means": {
4
+ "PCC(Dev)": "0.71",
5
+ "PCC(Hk)": "0.79"
6
+ }
7
+ }
8
+ }
examples/AutoEAP_UMI-STARR-seq/HyenaMSTA+/res/hyenamsta_plus.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe47b799611fea36cddef48e669a7568e981c0098a7c3cc46e4ca43d3da422e1
3
+ size 67015544