ATLAS Offline Software
Loading...
Searching...
No Matches
positions/trainNN.cxx
Go to the documentation of this file.
1/*
2 Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
3*/
4
5#include <TTree.h>
6#include <TFile.h>
7#include <TCanvas.h>
8#include "TChain.h"
9#include <TH1F.h>
10#include <TLegend.h>
11#include <TPad.h>
12
13#include "../TJetNet.h"
14#include "../doNormalization.C"
15#include "Riostream.h"
17#include "TSystem.h"
18
19#include "../TTrainedNetwork.h"
20
21#include "TMatrixD.h"
22#include "TVectorD.h"
23#include "trainNN.h"
24
25#include <vector>
26#include <utility>
27#include <algorithm>
28#include <string>
29#include <cmath>
30#include <iostream>
31#include <stdexcept>
32
33using namespace std;
34
35Double_t sigmoid(Double_t x)
36{
37 return 1./(1.+exp(-2*x));
38}
39
40using namespace std;
41bool isBadCluster( int sizeX, int nParticles ){
42
43
44 if(sizeX==-100){return true;}
45 if(nParticles==0){return true;}
46 if(nParticles!=1 && nParticles!=2 && nParticles!=3){return true;}
47
48
49
50 return false;
51
52
53}
54
55
56bool skipSingle(int nParticles,int iClus, int dilutionFactor){
57
58
59 // this reduces the number of single particle clusters of a factor k
60 int k = 60;
61
62
63 if (nParticles==1)
64 {
65 if ((iClus %(dilutionFactor*k) != 0) && (iClus % (dilutionFactor*k) != 1))
66 {
67 return true;
68 }
69 }
70
71 return false;
72
73}
74
75
76
77bool badTrackInfo(bool useTrackEstimate, double theta){
78
79 if(useTrackEstimate && theta==0){return true;}
80 return false;
81
82}
83
84
85
86int main()
87{
88 trainNN(
89 "../reduceddatasets/reduceddataset_Cone4H1TopoParticleJets_forNN.root",
90 "dummy",
91 10000,
92 200,
93 10,
94 10,
95 0);
96 return 0;
97}
98
99
100
101void trainNN(TString inputfile,
102 TString outputclass,
103 int nIterations,
104 int dilutionFactor,
105 int nodesFirstLayer,
106 int nodesSecondLayer,
107 int restartTrainingFrom,
108 int nParticlesTraining,
109 bool useTrackEstimate,
110 int nPatternsPerUpdate,
111 double learningRate,
112 double learningRateDecrease,
113 double learningRateMomentum) {
114
115 double bweight=1;
116 double cweight=1.;
117 double lweight=1;
118
119 gROOT->SetStyle("Plain");
120
121 cout << "starting with settings: " << endl;
122 cout << " nIterations: " << nIterations << endl;
123 cout << " dilutionFactor: " << dilutionFactor << endl;
124 cout << " nodesFirstLayer: " << nodesFirstLayer << endl;
125 cout << " nodesSecondLayer: " << nodesSecondLayer << endl;
126
127// TFile *file = TFile::Open(inputfile);
128// TTree *simu = (TTree*)file->Get("Validation/NNinput");
129
130 TChain *myChain = new TChain("Validation/NNinput");
131
132
133if(!useTrackEstimate){
134 #include "../files.txt"
135}
136
137if(useTrackEstimate){
138 #include "../filesOnTrack.txt"
139}
140 TChain* simu=myChain;
141
142 std::cout << " Training sample obtained... " << std::endl;
143
144 vector<int> *NN_sizeX;
145 vector<int> *NN_sizeY;
146 vector<vector<float> > *NN_matrixOfToT;
147 vector<vector<float> > *NN_vectorOfPitchesY;
148 vector<int> *NN_ClusterPixLayer;
149 vector<int> *NN_ClusterPixBarrelEC;
150 vector<float> *NN_phiBS;
151 vector<float> *NN_thetaBS;
152 vector<float> *NN_etaModule;
153 vector<bool> *NN_useTrackInfo;
154 vector<int> *NN_columnWeightedPosition;
155 vector<int> *NN_rowWeightedPosition;
156 vector<vector<float> > *NN_positionX;
157 vector<vector<float> > *NN_positionY;
158 vector<vector<float> > *NN_position_idX;
159 vector<vector<float> > *NN_position_idY;
160 vector<vector<float> > *NN_theta;
161 vector<vector<float> > *NN_phi;
162
163 // List of branches
164 TBranch *b_NN_sizeX;
165 TBranch *b_NN_sizeY;
166 TBranch *b_NN_matrixOfToT;
167 TBranch *b_NN_vectorOfPitchesY;
168 TBranch *b_NN_ClusterPixLayer;
169 TBranch *b_NN_ClusterPixBarrelEC;
170 TBranch *b_NN_phiBS;
171 TBranch *b_NN_thetaBS;
172 TBranch *b_NN_etaModule;
173 TBranch *b_NN_useTrackInfo;
174 TBranch *b_NN_columnWeightedPosition;
175 TBranch *b_NN_rowWeightedPosition;
176 TBranch *b_NN_positionX;
177 TBranch *b_NN_positionY;
178 TBranch *b_NN_position_idX;
179 TBranch *b_NN_position_idY;
180 TBranch *b_NN_theta;
181 TBranch *b_NN_phi;
182
183
184
185 NN_sizeX = 0;
186 NN_sizeY = 0;
187 NN_matrixOfToT = 0;
188 NN_vectorOfPitchesY = 0;
189 NN_ClusterPixLayer = 0;
190 NN_ClusterPixBarrelEC = 0;
191 NN_phiBS = 0;
192 NN_thetaBS = 0;
193 NN_etaModule = 0;
194 NN_useTrackInfo = 0;
195 NN_columnWeightedPosition = 0;
196 NN_rowWeightedPosition = 0;
197 NN_positionX = 0;
198 NN_positionY = 0;
199 NN_position_idX = 0;
200 NN_position_idY = 0;
201 NN_theta = 0;
202 NN_phi = 0;
203 // Set branch addresses and branch pointers
204 // if (!tree) return 0;
205 // TTree* simu = tree;
206 // fCurrent = -1;
207 simu->SetMakeClass(1);
208
209 simu->SetBranchAddress("NN_sizeX", &NN_sizeX, &b_NN_sizeX);
210 simu->SetBranchAddress("NN_sizeY", &NN_sizeY, &b_NN_sizeY);
211 simu->SetBranchAddress("NN_matrixOfToT", &NN_matrixOfToT, &b_NN_matrixOfToT);
212 simu->SetBranchAddress("NN_vectorOfPitchesY", &NN_vectorOfPitchesY, &b_NN_vectorOfPitchesY);
213 simu->SetBranchAddress("NN_ClusterPixLayer", &NN_ClusterPixLayer, &b_NN_ClusterPixLayer);
214 simu->SetBranchAddress("NN_ClusterPixBarrelEC", &NN_ClusterPixBarrelEC, &b_NN_ClusterPixBarrelEC);
215 simu->SetBranchAddress("NN_phiBS", &NN_phiBS, &b_NN_phiBS);
216 simu->SetBranchAddress("NN_thetaBS", &NN_thetaBS, &b_NN_thetaBS);
217 simu->SetBranchAddress("NN_etaModule", &NN_etaModule, &b_NN_etaModule);
218 simu->SetBranchAddress("NN_useTrackInfo", &NN_useTrackInfo, &b_NN_useTrackInfo);
219 simu->SetBranchAddress("NN_columnWeightedPosition", &NN_columnWeightedPosition, &b_NN_columnWeightedPosition);
220 simu->SetBranchAddress("NN_rowWeightedPosition", &NN_rowWeightedPosition, &b_NN_rowWeightedPosition);
221 simu->SetBranchAddress("NN_positionX", &NN_positionX, &b_NN_positionX);
222 simu->SetBranchAddress("NN_positionY", &NN_positionY, &b_NN_positionY);
223 simu->SetBranchAddress("NN_position_idX", &NN_position_idX, &b_NN_position_idX);
224 simu->SetBranchAddress("NN_position_idY", &NN_position_idY, &b_NN_position_idY);
225 simu->SetBranchAddress("NN_theta", &NN_theta, &b_NN_theta);
226 simu->SetBranchAddress("NN_phi", &NN_phi, &b_NN_phi);
227
228
229 cout << "Branches set..." << endl;
230
231 TString filterTrain("Entry$%");
232 filterTrain+=dilutionFactor;
233 filterTrain+="==0";
234
235 TString filterTest("Entry$%");
236 filterTest+=dilutionFactor;
237 filterTest+="==1";
238
239 int* nneurons;
240 int nlayer=3;
241
242 simu->GetEntry(0);
243
244
245 cout << "First entry..." << endl;
246
247 Int_t sizeX=-7;
248 Int_t sizeY=-7;
249
250
251 // loop over the clusters loking for the first cluster properly saved
252 for( unsigned int clus =0; clus<NN_sizeX->size(); clus++ ){
253
254 sizeX = (*NN_sizeX)[clus];
255 sizeY = (*NN_sizeY)[clus];
256
257 if(sizeX>0)break;
258
259 }
260
261 cout << "Size obtained" << endl;
262
263
264 int numberinputs=sizeX*(sizeY+1)+4;
265 if (!useTrackEstimate)
266 {
267 numberinputs=sizeX*(sizeY+1)+5;
268 }
269
270 int numberoutputs=2*nParticlesTraining;
271
272 if (nodesSecondLayer!=0)
273 {
274 nlayer=4;
275 }
276
277 if (nodesSecondLayer!=0)
278 {
279 nneurons=new int[4];
280 }
281 else
282 {
283 nneurons=new int[3];
284 }
285
286 nneurons[0]=numberinputs;
287
288 nneurons[1]=nodesFirstLayer;
289
290 if (nodesSecondLayer!=0)
291 {
292 nneurons[2]=nodesSecondLayer;
293 nneurons[3]=numberoutputs;//number of output nodes
294 }
295 else
296 {
297 nneurons[2]=numberoutputs;//number of output nodes
298 }
299
300 for (int i=0;i<nlayer;i++)
301 {
302 cout << " layer i: " << i << " number neurons: " << nneurons[i] << endl;
303 }
304
305
306 // float eventWeight(0);
307 float trainingError(0);
308 float testError(0);
309
310 //setting learning parameters
311
312 cout << " now providing training events " << endl;
313
314 Int_t numberTrainingEvents=0;
315 Int_t numberTestingEvents=0;
316
317 int iClus=0;
318 int part_0=0;
319 int part_1=0;
320 int part_2=0;
321 int part_3=0;
322
323 int totalN=simu->GetEntries();
324
325// totalN=10000;
326
327 // Loop over entries:
328 for (Int_t i = 0; i < totalN; i++) {
329
330 if (i % 50000 == 0 ) {
331 std::cout << " Counting training / testing events in sample. Looping over event " << i << std::endl;
332 }
333
334 simu->GetEntry(i);
335
336 for( unsigned int clus =0; clus<NN_sizeX->size(); clus++ ){
337
338 vector<float> *matrixOfToT=0;
339 vector<float> *vectorOfPitchesY=0;
340
341 Float_t phiBS;
342 Float_t thetaBS;
343 Float_t etaModule;
344 Int_t ClusterPixLayer;
345 Int_t ClusterPixBarrelEC;
346
347 std::vector<float> * positionX=0;
348 std::vector<float> * positionY=0;
349 std::vector<float> * thetaTr=0;
350 std::vector<float> * phiTr=0;
351
352 std::vector<float> positionX_reorder;
353 std::vector<float> positionY_reorder;
354
355
356 sizeX = (*NN_sizeX)[clus];
357 positionX =&(*NN_positionX)[clus];
358 int nParticles = positionX->size();
359
360 thetaTr = &(*NN_theta)[clus];
361 phiTr = &(*NN_phi)[clus];
362
363 if (nParticlesTraining!=nParticles)
364 {
365 continue;
366 }
367 if (isBadCluster(sizeX, nParticles ) )continue;
368
369 // loop over the particles;
370 for( unsigned int P = 0; P < positionX->size(); P++){
371 double theta = (*thetaTr)[P];
372 if (theta!=theta) continue;
373
374 iClus++;
375
376 if ( badTrackInfo(useTrackEstimate, theta ) )continue;
377
378
379
380 if (iClus%dilutionFactor==0) numberTrainingEvents+=1;
381 if (iClus%dilutionFactor==1) numberTestingEvents+=1;
382
383 if (iClus%dilutionFactor==1 && nParticles==1 ) part_1++;
384 if (iClus%dilutionFactor==1 && nParticles==2 ) part_2++;
385 if (iClus%dilutionFactor==1 && nParticles==3 ) part_3++;
386
387
388
389 }// end loop over th particles
390 }// end loop over cluster
391 }// end Loop over entries
392
393
394
395 cout << " N. training events: " << numberTrainingEvents <<
396 " N. testing events: " << numberTestingEvents << endl;
397
398 // if(numberTrainingEvents!=numberTestingEvents)return;
399
400 cout << "now start to setup the network..." << endl;
401
402 TJetNet* jn = new TJetNet( numberTestingEvents, numberTrainingEvents, nlayer, nneurons );
403 // return;
404 cout << " setting learning method... " << endl;
405
406 // jn->SetMSTJN(4,12); Fletscher-Rieves (Scaled Conj Grad)
407
408
409// int nPatternsPerUpdate=1;
410
411 jn->SetPatternsPerUpdate( nPatternsPerUpdate );
412 jn->SetUpdatesPerEpoch( (int)std::floor((float)numberTrainingEvents/(float)nPatternsPerUpdate) );
413 jn->SetUpdatingProcedure( 0 );
414 jn->SetErrorMeasure( 0 );
415 jn->SetActivationFunction( 1 );
416
417 jn->SetLearningRate( learningRate );//0.8
418// jn->SetLearningRate( 1 );//0.8
419// jn->SetMomentum( 0.2 );//0.3 //is now 0.5
420 jn->SetMomentum( learningRateMomentum );//0.3 //is now 0.5
421 jn->SetInitialWeightsWidth( 1. );
422 // jn->SetLearningRateDecrease( 0.992 );
423
424 jn->SetLearningRateDecrease( learningRateDecrease );//0.992
425// jn->SetLearningRateDecrease( 0.995 );//0.992
426 jn->SetActivationFunction(4,nlayer-1-1);
427
428
429
430 cout << " setting pattern for training events " << endl;
431
432 int trainSampleNumber=0;
433 int testSampleNumber=1;
434
435 cout << " copying over training events " << endl;
436
437 int counter=0;
438 int counter0=0;
439 int counter1=0;
440
441 //input and output of first event
442 vector<double> inputVar;
443 vector<double> outputVar;
444
445
446 iClus=0;
447 for (Int_t i = 0; i < totalN; i++) {
448
449 if (i % 100000 == 0 ) {
450 std::cout << " Copying over training events. Looping over event " << i << std::endl;
451 }
452
453
454 simu->GetEntry(i);
455
456 for( unsigned int clus =0; clus<NN_sizeX->size(); clus++ ){
457
458 vector<float> *matrixOfToT=0;
459 vector<float> *vectorOfPitchesY=0;
460
461 Float_t phiBS;
462 Float_t thetaBS;
463 Float_t etaModule;
464 Int_t ClusterPixLayer;
465 Int_t ClusterPixBarrelEC;
466
467 std::vector<float> * position_idX=0;
468 std::vector<float> * position_idY=0;
469 std::vector<float> * thetaTr=0;
470 std::vector<float> * phiTr=0;
471
472
473 sizeX = (*NN_sizeX)[clus];
474
475 sizeY = (*NN_sizeY)[clus];
476 matrixOfToT=&(*NN_matrixOfToT)[clus];
477 vectorOfPitchesY=&(*NN_vectorOfPitchesY)[clus];
478
479 phiBS = (*NN_phiBS)[clus];
480 thetaBS =(*NN_thetaBS)[clus];
481 etaModule =(*NN_etaModule)[clus];
482
483 ClusterPixLayer=(*NN_ClusterPixLayer)[clus];
484 ClusterPixBarrelEC = (*NN_ClusterPixBarrelEC)[clus];
485
486 position_idX =&(*NN_position_idX)[clus];
487 position_idY =&(*NN_position_idY)[clus];
488
489 int nParticles = position_idX->size();
490
491 if (nParticlesTraining!=nParticles)
492 {
493 continue;
494 }
495
496 thetaTr = &(*NN_theta)[clus];
497 phiTr = &(*NN_phi)[clus];
498
499
500 if(isBadCluster(sizeX, nParticles ) )continue;
501
502 for( unsigned int P = 0; P < position_idX->size(); P++){
503
504 double theta = (*thetaTr)[P];
505 double phi = (*phiTr)[P];
506 if (theta!=theta) continue;
507
508 if (ClusterPixBarrelEC==2)
509 {
510 theta=-theta;
511 phi=-phi;
512 }
513
514 iClus++;
515
516
517
518 if ( badTrackInfo(useTrackEstimate, theta ) )continue;
519
520
521
522 if (matrixOfToT->size()!=sizeX*sizeY)
523 {
524 std::cout << " Event: " << i << " PROBLEM: size Y is: " << matrixOfToT->size() << std::endl;
525 throw std::runtime_error("Error in positions/trainNN.cxx");
526 }
527
528 // loop over elements of matrixOfTot which is actually a vector
529 for(unsigned int ME =0; ME < matrixOfToT->size(); ME++){
530
531 if (iClus%dilutionFactor==0) jn->SetInputTrainSet( counter0, ME, norm_ToT((*matrixOfToT)[ME]));
532 if (iClus%dilutionFactor==1) jn->SetInputTestSet( counter1, ME, norm_ToT((*matrixOfToT)[ME]));
533
534 if ((*matrixOfToT)[ME] != (*matrixOfToT)[ME])
535 {
536 cout << "ME n. " << ME << " is: " << (*matrixOfToT)[ME] << endl;
537 throw std::runtime_error("Error in positions/trainNN.cxx");
538 }
539
540 if (counter0 == 0) std::cout << " element: " << ME << " ToT set to: " << norm_ToT((*matrixOfToT)[ME]) << std::endl;
541 if (iClus%dilutionFactor==1&&counter1==0) inputVar.push_back(norm_ToT((*matrixOfToT)[ME]));
542
543 }
544
545
546 // loop over vector of pitches
547 for (int s=0;s<sizeY;s++)
548 {
549 if (iClus%dilutionFactor==0) jn->SetInputTrainSet( counter0, sizeX*sizeY+s, norm_pitch((*vectorOfPitchesY)[s]));
550 if (iClus%dilutionFactor==1) jn->SetInputTestSet( counter1, sizeX*sizeY+s, norm_pitch((*vectorOfPitchesY)[s]));
551
552 if ((*vectorOfPitchesY)[s]!=(*vectorOfPitchesY)[s])
553 {
554 cout << " pitchY: " << (*vectorOfPitchesY)[s] << endl;
555 throw std::runtime_error("Error in positions/trainNN.cxx");;
556 }
557 if (counter0 == 0) std::cout << " s: " << s << " pitch set to: " << norm_pitch((*vectorOfPitchesY)[s]) << std::endl;
558
559 if (iClus%dilutionFactor==1&&counter1==0) inputVar.push_back(norm_pitch((*vectorOfPitchesY)[s]));
560 }
561
562 if (iClus%dilutionFactor==0) jn->SetInputTrainSet( counter0, (sizeX+1)*sizeY, norm_layerNumber(ClusterPixLayer));
563 if (iClus%dilutionFactor==0) jn->SetInputTrainSet( counter0, (sizeX+1)*sizeY+1, norm_layerType(ClusterPixBarrelEC));
564
565 if (ClusterPixLayer!=ClusterPixLayer || ClusterPixBarrelEC!=ClusterPixBarrelEC)
566 {
567 cout << " ClusterPixLayer: " << ClusterPixLayer << " ClusterPixBarrelEC " << ClusterPixBarrelEC << endl;
568 throw std::runtime_error("Error in positions/trainNN.cxx");
569 }
570
571 if (iClus%dilutionFactor==1) jn->SetInputTestSet( counter1, (sizeX+1)*sizeY, norm_layerNumber(ClusterPixLayer));
572 if (iClus%dilutionFactor==1) jn->SetInputTestSet( counter1, (sizeX+1)*sizeY+1, norm_layerType(ClusterPixBarrelEC));
573
574 if (iClus%dilutionFactor==1&&counter1==0) {
575 inputVar.push_back(norm_layerNumber(ClusterPixLayer));
576 inputVar.push_back(norm_layerType(ClusterPixBarrelEC));
577 }
578
579
580 if (counter0 == 0) std::cout << " ClusterPixLayer " << norm_layerNumber(ClusterPixLayer) << " ClusterPixBarrelEC " << norm_layerType(ClusterPixBarrelEC) << std::endl;
581
582 if (useTrackEstimate)
583 {
584 if (iClus%dilutionFactor==0) jn->SetInputTrainSet( counter0, (sizeX+1)*sizeY+2, norm_phi(phi) );
585 if (iClus%dilutionFactor==0) jn->SetInputTrainSet( counter0, (sizeX+1)*sizeY+3, norm_theta(theta) );
586
587 if (phi!=phi) {
588 cout << " phi: " << phi << endl;
589 throw std::runtime_error("Error in positions/trainNN.cxx");
590 }
591
592 if (theta!=theta) {
593 cout << " theta: " << theta << endl;
594 throw std::runtime_error("Error in positions/trainNN.cxx");
595 }
596
597
598
599 if (iClus%dilutionFactor==1) jn->SetInputTestSet( counter1, (sizeX+1)*sizeY+2, norm_phi(phi) );
600 if (iClus%dilutionFactor==1) jn->SetInputTestSet( counter1, (sizeX+1)*sizeY+3, norm_theta(theta) );
601
602 if (counter0==0) std::cout << " phi " << norm_phi(phi) << " theta: " << norm_theta(theta) << std::endl;
603
604 if (iClus%dilutionFactor==1&&counter1==0) {
605 inputVar.push_back(norm_phi(phi));
606 inputVar.push_back(norm_theta(theta));
607 }
608 }
609 else
610 {
611 if (iClus%dilutionFactor==0) jn->SetInputTrainSet( counter0, (sizeX+1)*sizeY+2, norm_phiBS(phiBS) );
612 if (iClus%dilutionFactor==0) jn->SetInputTrainSet( counter0, (sizeX+1)*sizeY+3, norm_thetaBS(thetaBS) );
613 if (iClus%dilutionFactor==0) jn->SetInputTrainSet( counter0, (sizeX+1)*sizeY+4, norm_etaModule(etaModule) );
614
615 if (iClus%dilutionFactor==1) jn->SetInputTestSet( counter1, (sizeX+1)*sizeY+2, norm_phiBS(phiBS) );
616 if (iClus%dilutionFactor==1) jn->SetInputTestSet( counter1, (sizeX+1)*sizeY+3, norm_thetaBS(thetaBS) );
617 if (iClus%dilutionFactor==1) jn->SetInputTestSet( counter1, (sizeX+1)*sizeY+4, norm_etaModule(etaModule) );
618
619
620 if (iClus%dilutionFactor==1&&counter1==0) {
621 inputVar.push_back(norm_phiBS(phiBS));
622 inputVar.push_back(norm_thetaBS(thetaBS));
623 inputVar.push_back(norm_etaModule(etaModule) );
624 }
625
626 if (counter0==0) std::cout <<
627 " phiBS " << norm_phiBS(phiBS) <<
628 " thetaBS: " << norm_thetaBS(thetaBS) <<
629 " etaModule: " << norm_etaModule(etaModule) << std::endl;
630 }
631
632
633 vector<float> xPositions = *position_idX;
634
635 // for (int e=0;e<nParticles;e++)
636 // {
637 // xPositions.push_back((*positionsX)[e]);
638 // }
639
640 std::sort(xPositions.begin(),xPositions.end());
641
642 for (int o=0;o<xPositions.size();o++)
643 {
644
645 if (iClus%dilutionFactor==0) jn->SetOutputTrainSet(counter0, 2*o, norm_posX(xPositions[o]));
646 if (iClus%dilutionFactor==1) jn->SetOutputTestSet(counter1, 2*o, norm_posX(xPositions[o]));
647
648 if (xPositions[o]!=xPositions[o])
649 {
650 cout << "pos: " << xPositions[o] << endl;
651 throw std::runtime_error("Error in positions/trainNN.cxx");
652 }
653
654 if (counter0==0) std::cout << " output node: " << 2*o << " set to: " << norm_posX(xPositions[o]) << endl;
655
656 if (iClus%dilutionFactor==1&&counter1==0) { outputVar.push_back(norm_posX(xPositions[o]));}
657
658 double corry=-1000;
659
660 for (int e=0;e<nParticles;e++)
661 {
662 if (fabs((*position_idX)[e]-xPositions[o])<1e-10)
663 {
664 if (fabs(corry+1000)>1e-6)
665 {
666 cout << " Value find more than once! " << endl;
667 for (int p=0;p<xPositions.size();p++)
668 {
669 cout << " X n. : " << p << " is: " << xPositions[p] << endl;
670 }
671 }
672 corry=(*position_idY)[e];
673 }
674 }
675
676 if (fabs(corry+1000)<1e-6) {
677 cout << " could not find original X pos. " << endl;
678 throw std::runtime_error("Error in positions/trainNN.cxx");
679 }
680
681 if (iClus%dilutionFactor==0) jn->SetOutputTrainSet(counter0, 2*o+1, norm_posY(corry));
682 if (iClus%dilutionFactor==1) jn->SetOutputTestSet(counter1, 2*o+1, norm_posY(corry));
683
684 if (corry!=corry) {
685 cout << " posY " << corry << endl;
686 throw std::runtime_error("Error in positions/trainNN.cxx");
687 }
688
689
690 if (counter0==0) std::cout << " output node: " << 2*o+1 << " set to: " << norm_posY(corry) << endl;
691 if (iClus%dilutionFactor==1&&counter1==0) { outputVar.push_back(norm_posY(corry));}
692
693 }
694
695 if (iClus%dilutionFactor==0) jn->SetEventWeightTrainSet( counter0, 1 );
696 if (iClus%dilutionFactor==1) jn->SetEventWeightTestSet( counter1, 1 );
697
698
699 if (iClus%dilutionFactor==0){counter0+=1;}
700 if (iClus%dilutionFactor==1){counter1+=1;}
701
702
703
704 }
705 }
706 }
707
708 if (counter0!=numberTrainingEvents)
709 {
710 cout << " counter up to: " << counter0 << " while events in training sample are " << numberTrainingEvents << endl;
711 return;
712 }
713
714 if (counter1!=numberTestingEvents)
715 {
716 cout << " counter up to: " << counter1 << " while events in testing sample are " << numberTestingEvents << endl;
717 return;
718 }
719
720 // return;
721
722 jn->Shuffle(true,false);
723
724 if (restartTrainingFrom==0)
725 {
726 jn->Init();
727 // jn->DumpToFile("WeightsInitial.w");
728 }
729 else
730 {
731 TString name("Weights");
732 name+=restartTrainingFrom;
733 name+=".w";
734
735 jn->ReadFromFile(name);
736 }
737
738
739
740 float minimumError=1e10;
741 int epochesWithRisingError=0;
742 int epochWithMinimum=0;
743
744 int updatesPerEpoch=jn->GetUpdatesPerEpoch();
745
746 //prepare output stream
747
748 TString directory("weights");
749 directory+="_nPat";
750 directory+=nPatternsPerUpdate;
751 directory+="_rate";
752 directory+=(int)(learningRate*100);
753 directory+="_rdec";
754 directory+=(int)(100*learningRateDecrease);
755 directory+="_mom";
756 directory+=(int)(100*learningRateMomentum);
757 directory+="_";
758 directory+=nParticlesTraining;
759 if (useTrackEstimate)
760 {
761 directory+="_withTracks";
762 }
763
764 TString command("mkdir ");
765 command+=directory;
766
767 gSystem->Exec(command);
768
769 TString nameCronology=directory;
770
771 nameCronology+="/trainingCronology.txt";
772
773 ofstream cronology(nameCronology,ios_base::out);//|ios_base::app);
774
775 cronology << "-------------SETTINGS----------------" << endl;
776 cronology << "Epochs: " << jn->GetEpochs() << std::endl;
777 cronology << "Updates Per Epoch: " << jn->GetUpdatesPerEpoch() << std::endl;
778 cronology << "Updating Procedure: " << jn->GetUpdatingProcedure() << std::endl;
779 cronology << "Error Measure: " << jn->GetErrorMeasure() << std::endl;
780 cronology << "Patterns Per Update: " << jn->GetPatternsPerUpdate() << std::endl;
781 cronology << "Learning Rate: " << jn->GetLearningRate() << std::endl;
782 cronology << "Momentum: " << jn->GetMomentum() << std::endl;
783 cronology << "Initial Weights Width: " << jn->GetInitialWeightsWidth() << std::endl;
784 cronology << "Learning Rate Decrease: " << jn->GetLearningRateDecrease() << std::endl;
785 cronology << "Activation Function: " << jn->GetActivationFunction() << std::endl;
786 cronology << "-------------LAYOUT------------------" << endl;
787 cronology << "Input variables: " << jn->GetInputDim() << endl;
788 cronology << "Output variables: " << jn->GetOutputDim() << endl;
789 cronology << "Hidden layers: " << jn->GetHiddenLayerDim() << endl;
790 cronology << "Layout : ";
791 for (Int_t s=0;s<jn->GetHiddenLayerDim()+2;++s)
792 {
793 cronology << jn->GetHiddenLayerSize(s);
794 if (s<jn->GetHiddenLayerDim()+1) cronology << "-";
795 }
796 cronology << endl;
797 cronology << "--------------HISTORY-----------------" << endl;
798 cronology << "History of iterations: " << endl;
799 cronology.close();
800
801 //prepare training histo
802 TH1F* histoTraining=new TH1F("training","training",(int)std::floor((float)nIterations/10.+0.5),1,std::floor((float)nIterations/10.+1.5));
803 TH1F* histoTesting=new TH1F("testing","testing",(int)std::floor((float)nIterations/10.+0.5),1,std::floor((float)nIterations/10.+1.5));
804
805 double maximumTrain=0;
806 double minimumTrain=1e10;
807
808 for(int epoch=restartTrainingFrom+1;epoch<=nIterations;++epoch)
809 {
810 if (epoch!=restartTrainingFrom+1)
811 {
812 trainingError = jn->Train();
813 }
814
815 if (epoch%10==0 || epoch==restartTrainingFrom+1)
816 {
817
818 cronology.open(nameCronology,ios_base::app);
819
820 testError = jn->Test();
821
822 if (trainingError>maximumTrain) maximumTrain=trainingError;
823 if (testError>maximumTrain) maximumTrain=testError;
824 if (trainingError<minimumTrain) minimumTrain=trainingError;
825 if (testError<minimumTrain) minimumTrain=testError;
826
827
828 histoTraining->Fill(epoch/10.,trainingError);
829 histoTesting->Fill(epoch/10.,testError);
830
831 if (testError<minimumError)
832 {
833 minimumError=testError;
834 epochesWithRisingError=0;
835 epochWithMinimum=epoch;
836 }
837 else
838 {
839 epochesWithRisingError+=10;
840
841 }
842
843
844 if (epochesWithRisingError>300)
845 {
846 if (trainingError<minimumError)
847 {
848 cout << " End of training. Minimum already on epoch: " << epochWithMinimum << endl;
849 cronology << " End of training. Minimum already on epoch: " << epochWithMinimum << endl;
850 break;
851 }
852 }
853
854 cronology << "Epoch: [" << epoch <<
855 "] Error: " << trainingError <<
856 " Test: " << testError << endl;
857
858 cout << "Epoch: [" << epoch <<
859 "] Error: " << trainingError <<
860 " Test: " << testError << endl;
861
862 cronology.close();
863
864 TString name=directory;
865 name+="/Weights";
866 name+=epoch;
867 name+=".root";
868
869 TFile* file=new TFile(name,"recreate");
870 TTrainedNetwork* trainedNetwork=jn->createTrainedNetwork();
871
872
873 jn->Evaluate( 0 ); //evaluate the first test pattern
874 for (int z=0;z<nParticlesTraining;z++)
875 {
876 std::cout << "output "<<z<<" x: " << jn->GetOutput(2*z) << " y: " << jn->GetOutput(2*z+1) << endl;
877 }
878
879 std::vector<double> myTestOutput=trainedNetwork->calculateOutputValues(inputVar);
880 for (int z=0;z<nParticlesTraining;z++)
881 {
882 std::cout << "output TTNet "<<z<<" x: " << myTestOutput[2*z] <<
883 " y: " << myTestOutput[2*z+1] << endl;
884 }
885 for (int z=0;z<nParticlesTraining;z++)
886 {
887 std::cout << "should be " << z << "x: " << outputVar[2*z] << " y: " << outputVar[2*z+1] << endl;
888 }
889
890 trainedNetwork->Write();
891 file->Write();
892 file->Close();
893 delete file;
894
895 /*
896 TFile* file2=new TFile(name);
897 trainedNetwork=(TTrainedNetwork*)file2->Get("TTrainedNetwork");
898 cout <<" hid lay 1 size: " << trainedNetwork->getnHiddenLayerSize()[0] << endl;
899 file2->Close();
900 delete file2;
901 */
902
903 // jn->DumpToFile(name);
904 }
905 }
906
907 jn->writeNetworkInfo(1);
908 jn->writeNetworkInfo(2);
909 // jn->writeNetworkInfo(3);
910 // jn->writeNetworkInfo(4);
911 // jn->writeNetworkInfo(5);
912
913
914 // cout << " Now try to understand how to get the weights..." << endl;
915
917 Int_t nInput=jn->GetInputDim();
918
919 cout << " create Trained Network object..." << endl;
920
921 TTrainedNetwork* trainedNetwork=jn->createTrainedNetwork();
922
923/*
924 cout << " now getting value with trained Network ";
925
926
927
928
929 double inputexample[9]={norm_nVTX(1),
930 norm_nTracksAtVtx(2),
931 norm_nSingleTracks(0),
932 norm_energyFraction(0.6),
933 norm_mass(2500),
934 norm_significance3d(4 ),
935 norm_IP3D(3),
936 norm_cat_pT(3),
937 norm_cat_eta(1)};
938
939 for (Int_t i=0;i<nInput;++i)
940 {
941 jn->SetInputs(i,inputexample[i]);
942 }
943
944 cronology.open("weights/trainingCronology.txt",ios_base::app);
945
946 jn->Evaluate();
947
948 cronology << "----------------CONSISTENCY CHECK-----------" << endl;
949 cout << "Result 0:" << jn->GetOutput(0);
950 cronology << "Result 0:" << jn->GetOutput(0);
951 cout << " Result 1:" << jn->GetOutput(1);
952 cronology << "Result 0:" << jn->GetOutput(1);
953 cout << " Result 2:" << jn->GetOutput(2) << endl;
954 cronology << " Result 2:" << jn->GetOutput(2) << endl;
955
956 cout << " Reading back old network " << endl;
957 jn->readBackTrainedNetwork(trainedNetwork);
958
959 cout <<" resetting input " << endl;
960 for (Int_t i=0;i<nInput;++i)
961 {
962 jn->SetInputs(i,inputexample[i]);
963 }
964
965 jn->Evaluate();
966
967 cout << "After reading back - Result 0:" << jn->GetOutput(0);
968 cronology << "After reading back - Result 0:" << jn->GetOutput(0);
969 // << " my: " << result[0] << endl;
970 cout << " After reading back - Result 1:" << jn->GetOutput(1);
971 cronology << "After reading back - Result 1:" << jn->GetOutput(1);
972 //<< " my: " << result[1] << endl;
973 cout << " After reading back - Result 2:" << jn->GetOutput(2) << endl;
974 cronology << "After reading back - Result 2:" << jn->GetOutput(2);
975 // << " my: " << result[2] << endl;
976 */
977
978 cout << " Now getting histograms from trainingResult" << endl;
979 cronology << " Now getting histograms from trainingResult" << endl;
980
981 TNetworkToHistoTool myHistoTool;
982
983 cout << " From network to histo..." << endl;
984 std::vector<TH1*> myHistos=myHistoTool.fromTrainedNetworkToHisto(trainedNetwork);
985
986 cout << " From histo to network back..." << endl;
987 TTrainedNetwork* trainedNetwork2=myHistoTool.fromHistoToTrainedNetwork(myHistos);
988
989 cout << " reading back " << endl;
990 jn->readBackTrainedNetwork(trainedNetwork2);
991
992/*
993 cout <<" resetting input " << endl;
994 for (Int_t i=0;i<nInput;++i)
995 {
996 jn->SetInputs(i,inputexample[i]);
997 }
998
999 jn->Evaluate();
1000
1001 cout << "After reading back - Result 0:" << jn->GetOutput(0);
1002 cronology << "After reading back - Result 0:" << jn->GetOutput(0);
1003 // << " my: " << result[0] << endl;
1004 cout << " After reading back - Result 1:" << jn->GetOutput(1);
1005 cronology << "After reading back - Result 1:" << jn->GetOutput(1);
1006 //<< " my: " << result[1] << endl;
1007 cout << " After reading back - Result 2:" << jn->GetOutput(2) << endl;
1008 cronology << "After reading back - Result 2:" << jn->GetOutput(2);
1009 // << " my: " << result[2] << endl;
1010
1011
1012 cout << " Directly from the trainedNetwork read back from HISTOS...!" << endl;
1013
1014 std::vector<Double_t> inputData;
1015 for (Int_t u=0;u<nInput;++u)
1016 {
1017 inputData.push_back(inputexample[u]);
1018 }
1019
1020 std::vector<Double_t> outputData=trainedNetwork2->calculateOutputValues(inputData);
1021
1022 cout << "After reading back - Result 0:" << outputData[0] << endl;
1023 cout << " After reading back - Result 1:" << outputData[1] << endl;
1024 cout << " After reading back - Result 2:" << outputData[2] << endl;
1025*/
1026
1027
1028
1029
1030 if (epochWithMinimum!=0)
1031 {
1032 cronology << "Minimum stored from Epoch: " << epochWithMinimum << endl;
1033 } else
1034 {
1035 cronology << "Minimum not reached" << endl;
1036 }
1037
1038 cronology.close();
1039
1040 if (epochWithMinimum!=0)
1041 {
1042
1043 TString name=directory;
1044 name+="/Weights";
1045 name+=epochWithMinimum;
1046 name+=".root";
1047 std::cout << " reading back from minimum " << endl;
1048
1049
1050 TFile *_file0 = new TFile(name);
1051 TTrainedNetwork* trainedNetwork=(TTrainedNetwork*)_file0->Get("TTrainedNetwork");
1052
1053 cout << " Reading back network with minimum" << endl;
1054 jn->readBackTrainedNetwork(trainedNetwork);
1055
1056 TString nameFile=directory;
1057 nameFile+="/weightMinimum.root";
1058
1059 TFile* file=new TFile(nameFile,"recreate");
1060 trainedNetwork->Write();
1061 file->Write();
1062 file->Close();
1063 delete file;
1064
1065 cout << " -------------------- " << endl;
1066 cout << " Writing OUTPUT histos " << endl;
1067 TString histoFName=directory;
1068 histoFName+="/histoWeights.root";
1069
1070 TFile* fileHistos=new TFile(histoFName,"recreate");
1071 TNetworkToHistoTool histoTool;
1072 std::vector<TH1*> myHistos=histoTool.fromTrainedNetworkToHisto(trainedNetwork);
1073 std::vector<TH1*>::const_iterator histoBegin=myHistos.begin();
1074 std::vector<TH1*>::const_iterator histoEnd=myHistos.end();
1075 for (std::vector<TH1*>::const_iterator histoIter=histoBegin;
1076 histoIter!=histoEnd;++histoIter)
1077 {
1078 (*histoIter)->Write();
1079 }
1080 fileHistos->Write();
1081 fileHistos->Close();
1082 delete fileHistos;
1083
1084 // " filename: " << name << endl;
1085
1086 // jn->ReadFromFile(name);
1087
1088 }
1089 else
1090 {
1091 cout << " using network at last iteration (minimum not reached..." << endl;
1092 }
1093
1094 //here you should create the class... Still open how to deal with this...
1095 // char* myname=const_cast<char*>(static_cast<const char*>(outputclass));
1096 // ierr=mlpsavecf_(myname);
1097
1098 TString histoTName=directory;
1099 histoTName+="/trainingInfo.root";
1100
1101 TFile* histoFile=new TFile(histoTName,"recreate");
1102 histoTraining->Write();
1103 histoTesting->Write();
1104 histoFile->Write();
1105 histoFile->Close();
1106 delete histoFile;
1107
1108 TCanvas* trainingCanvas=new TCanvas("trainingCanvas","trainingCanvas");
1109 histoTraining->SetLineColor(2);
1110 histoTesting->SetLineColor(4);
1111
1112 histoTraining->GetYaxis()->SetRangeUser(minimumTrain,maximumTrain);
1113 histoTraining->Draw("l");
1114 histoTesting->Draw("lsame");
1115 TString canvasName=directory;
1116 canvasName+="/trainingCurve.eps";
1117 trainingCanvas->SaveAs(canvasName);
1118
1119
1120 TCanvas* mlpa_canvas = new TCanvas("jetnet_canvas","Network analysis");
1121 mlpa_canvas->Divide(2,4);
1122
1123/*
1124
1125// TCanvas* mlpa_canvas_5=gDirectory->Get("mlpa_canvas_5");
1126// mlpa_canvas_5->SetLogy(kTrue);
1127 gPad->SetLogy();
1128
1129 // Use the NN to plot the results for each sample
1130 // This will give approx. the same result as DrawNetwork.
1131 // All entries are used, while DrawNetwork focuses on
1132 // the test sample. Also the xaxis range is manually set.
1133 TH1F *bg2 = new TH1F("bg2h", "NN output", 50, -.5, 1.5);
1134 TH1F *bg = new TH1F("bgh", "NN output", 50, -.5, 1.5);
1135 TH1F *sig = new TH1F("sigh", "NN output", 50, -.5, 1.5);
1136
1137 //sig = 1 part; bg = 2 part; bg2 = 3 part
1138
1139 TH1F *bg2test = new TH1F("bg2htest", "NN output", 50, -.5, 1.5);
1140 TH1F *bgtest = new TH1F("bghtest", "NN output", 50, -.5, 1.5);
1141 TH1F *sigtest = new TH1F("sightest", "NN output", 50, -.5, 1.5);
1142
1143 int weight=1;
1144
1145 for (Int_t i = 0; i < totalN; i++) {
1146
1147 if (i % 100000 == 0 ) {
1148 std::cout << " First plot. Looping over event " << i << std::endl;
1149 }
1150
1151 if (i%dilutionFactor!=0&&i%dilutionFactor!=1) continue;
1152
1153 simu->GetEntry(i);
1154
1155 for (int u=0;u<sizeX;u++)
1156 {
1157 for (int s=0;s<sizeY;s++)
1158 {
1159 jn->SetInputs( s+u*sizeY, norm_ToT((*matrixOfToT)[u][s]));
1160 }
1161 }
1162 for (int s=0;s<sizeY;s++)
1163 {
1164 jn->SetInputs( sizeX*sizeY+s, norm_pitch((*vectorOfPitchesY)[s]));
1165 }
1166
1167 jn->SetInputs( (sizeX+1)*sizeY, norm_phi(phi) );
1168 jn->SetInputs( (sizeX+1)*sizeY+1, norm_theta(theta) );
1169
1170 jn->Evaluate();
1171
1172 float p1=jn->GetOutput(0);
1173 float p2=jn->GetOutput(1);
1174 float p3=jn->GetOutput(2);
1175
1176 if (nParticles==1)
1177 {
1178 if (i%dilutionFactor==0)
1179 {
1180 sig->Fill(p1/(p1+p2+p3),weight);
1181 }
1182 else if (i%dilutionFactor==1)
1183 {
1184 sigtest->Fill(p1/(p1+p2+p3),weight);
1185 }
1186 }
1187 if (nParticles==2)
1188 {
1189 if (i%dilutionFactor==0)
1190 {
1191 bg->Fill(p1/(p1+p2+p3),weight);
1192 }
1193 else if (i%dilutionFactor==1)
1194 {
1195 bgtest->Fill(p1/(p1+p2+p3),weight);
1196 }
1197 }
1198 if (nParticles>=3)
1199 {
1200 if (i%dilutionFactor==0)
1201 {
1202 bg2->Fill(p1/(p1+p2+p3),weight);
1203 }
1204 else if (i%dilutionFactor==1)
1205 {
1206 bg2test->Fill(p1/(p1+p2+p3),weight);
1207 }
1208 }
1209 }
1210
1211 //now you need the maximum
1212 float maximum=1;
1213 for (Int_t a=0;a<bg->GetNbinsX();a++)
1214 {
1215 if (bg->GetBinContent(a)>maximum)
1216 {
1217 maximum=1.2*bg->GetBinContent(a);
1218 }
1219 }
1220
1221
1222 bg2->SetLineColor(kYellow);
1223 bg2->SetFillStyle(3008); bg2->SetFillColor(kYellow);
1224 bg->SetLineColor(kBlue);
1225 bg->SetFillStyle(3008); bg->SetFillColor(kBlue);
1226 sig->SetLineColor(kRed);
1227 sig->SetFillStyle(3003); sig->SetFillColor(kRed);
1228 bg2->SetStats(0);
1229 bg->SetStats(0);
1230 sig->SetStats(0);
1231
1232
1233 bg2test->SetLineColor(kYellow);
1234 bg2test->SetFillStyle(3008); bg2test->SetFillColor(kYellow);
1235 bgtest->SetLineColor(kBlue);
1236 bgtest->SetFillStyle(3008); bgtest->SetFillColor(kBlue);
1237 sigtest->SetLineColor(kRed);
1238 sigtest->SetFillStyle(3003); sigtest->SetFillColor(kRed);
1239 bg2test->SetStats(0);
1240 bgtest->SetStats(0);
1241 sigtest->SetStats(0);
1242
1243 mlpa_canvas->cd(1);
1244 gPad->SetLogy();
1245
1246 bg->GetYaxis()->SetRangeUser(1,maximum);
1247 bgtest->GetYaxis()->SetRangeUser(1,maximum);
1248
1249 mlpa_canvas->cd(1);
1250 bg->Draw();
1251 bg2->Draw("same");
1252 sig->Draw("same");
1253
1254 TLegend *legend = new TLegend(.75, .80, .95, .95);
1255 legend->AddEntry(bg2, "particles >=3");
1256 legend->AddEntry(bg, "particles = 2");
1257 legend->AddEntry(sig, "particles = 1");
1258 legend->Draw();
1259
1260 mlpa_canvas->cd(2);
1261 gPad->SetLogy();
1262
1263 bgtest->Draw();
1264 bg2test->Draw("same");
1265 sigtest->Draw("same");
1266
1267 TLegend *legendtest = new TLegend(.75, .80, .95, .95);
1268 legendtest->AddEntry(bg2test, "particles >=3");
1269 legendtest->AddEntry(bgtest, "particles = 2");
1270 legendtest->AddEntry(sigtest, "particles = 1");
1271 legendtest->Draw();
1272
1273 mlpa_canvas->cd(5);
1274 gPad->SetLogy();
1275 bg->DrawNormalized();
1276 bg2->DrawNormalized("same");
1277 sig->DrawNormalized("same");
1278 legend->Draw();
1279
1280 mlpa_canvas->cd(6);
1281 gPad->SetLogy();
1282 bgtest->DrawNormalized();
1283 bg2test->DrawNormalized("same");
1284 sigtest->DrawNormalized("same");
1285 legendtest->Draw();
1286
1287
1288
1289 mlpa_canvas->cd(3);
1290 gPad->SetLogy();
1291
1292 // Use the NN to plot the results for each sample
1293 // This will give approx. the same result as DrawNetwork.
1294 // All entries are used, while DrawNetwork focuses on
1295 // the test sample. Also the xaxis range is manually set.
1296 TH1F *c_bg2 = new TH1F("c_bg2h", "NN output", 50, -.5, 1.5);
1297 TH1F *c_bg = new TH1F("c_bgh", "NN output", 50, -.5, 1.5);
1298 TH1F *c_sig = new TH1F("c_sigh", "NN output", 50, -.5, 1.5);
1299
1300 TH1F *c_bg2test = new TH1F("c_bg2htest", "NN output", 50, -.5, 1.5);
1301 TH1F *c_bgtest = new TH1F("c_bghtest", "NN output", 50, -.5, 1.5);
1302 TH1F *c_sigtest = new TH1F("c_sightest", "NN output", 50, -.5, 1.5);
1303
1304 for (Int_t i = 0; i < totalN; i++) {
1305
1306 if (i % 100000 == 0 ) {
1307 std::cout << " Second plot. Looping over event " << i << std::endl;
1308 }
1309
1310 if (i%dilutionFactor!=0&&i%dilutionFactor!=1) continue;
1311
1312 simu->GetEntry(i);
1313
1314 for (int u=0;u<sizeX;u++)
1315 {
1316 for (int s=0;s<sizeY;s++)
1317 {
1318 jn->SetInputs( s+u*sizeY, norm_ToT((*matrixOfToT)[u][s]));
1319 }
1320 }
1321 for (int s=0;s<sizeY;s++)
1322 {
1323 jn->SetInputs( sizeX*sizeY+s, norm_pitch((*vectorOfPitchesY)[s]));
1324 }
1325
1326 jn->SetInputs( (sizeX+1)*sizeY, norm_phi(phi) );
1327 jn->SetInputs( (sizeX+1)*sizeY+1, norm_theta(theta) );
1328
1329 jn->Evaluate();
1330
1331 float p1=jn->GetOutput(0);
1332 float p2=jn->GetOutput(1);
1333 float p3=jn->GetOutput(2);
1334
1335 float discr=(p1+p2)/(p1+p2+p3);
1336
1337 if (nParticles==1)
1338 {
1339 if (i%dilutionFactor==0)
1340 {
1341 c_sig->Fill(discr,weight);
1342 }
1343 else if (i%dilutionFactor==1)
1344 {
1345 c_sigtest->Fill(discr,weight);
1346 }
1347 }
1348 if (nParticles==2)
1349 {
1350 if (i%dilutionFactor==0)
1351 {
1352 c_bg->Fill(discr,weight);
1353 }
1354 else if (i%dilutionFactor==1)
1355 {
1356 c_bgtest->Fill(discr,weight);
1357 }
1358 }
1359 if (nParticles>=3)
1360 {
1361 if (i%dilutionFactor==0)
1362 {
1363 c_bg2->Fill(discr,weight);
1364 }
1365 else if (i%dilutionFactor==1)
1366 {
1367 c_bg2test->Fill(discr,weight);
1368 }
1369 }
1370 }
1371
1372 //now you need the maximum
1373 maximum=1;
1374 for (Int_t a=0;a<c_bg->GetNbinsX();a++)
1375 {
1376 if (c_bg->GetBinContent(a)>maximum)
1377 {
1378 maximum=1.2*c_bg->GetBinContent(a);
1379 }
1380 }
1381
1382 c_bg2->SetLineColor(kYellow);
1383 c_bg2->SetFillStyle(3008); c_bg2->SetFillColor(kYellow);
1384 c_bg->SetLineColor(kBlue);
1385 c_bg->SetFillStyle(3008); c_bg->SetFillColor(kBlue);
1386 c_sig->SetLineColor(kRed);
1387 c_sig->SetFillStyle(3003); c_sig->SetFillColor(kRed);
1388 c_bg2->SetStats(0);
1389 c_bg->SetStats(0);
1390 c_sig->SetStats(0);
1391
1392 c_bg2test->SetLineColor(kYellow);
1393 c_bg2test->SetFillStyle(3008); c_bg2test->SetFillColor(kYellow);
1394 c_bgtest->SetLineColor(kBlue);
1395 c_bgtest->SetFillStyle(3008); c_bgtest->SetFillColor(kBlue);
1396 c_sigtest->SetLineColor(kRed);
1397 c_sigtest->SetFillStyle(3003); c_sigtest->SetFillColor(kRed);
1398 c_bg2test->SetStats(0);
1399 c_bgtest->SetStats(0);
1400 c_sigtest->SetStats(0);
1401
1402 mlpa_canvas->cd(3);
1403 gPad->SetLogy();
1404
1405
1406 c_bg->GetYaxis()->SetRangeUser(1,maximum);
1407 c_bgtest->GetYaxis()->SetRangeUser(1,maximum);
1408
1409 c_bg->Draw();
1410 c_bg2->Draw("same");
1411 c_sig->Draw("same");
1412
1413 TLegend *legend2 = new TLegend(.75, .80, .95, .95);
1414 legend2->AddEntry(c_bg2, "particles >=3");
1415 legend2->AddEntry(c_bg, "particles = 2");
1416 legend2->AddEntry(c_sig, "particles = 1");
1417 legend2->Draw();
1418
1419 mlpa_canvas->cd(4);
1420 gPad->SetLogy();
1421
1422 c_bgtest->Draw();
1423 c_bg2test->Draw("same");
1424 c_sigtest->Draw("same");
1425
1426 TLegend *legend2test = new TLegend(.75, .80, .95, .95);
1427 legend2test->AddEntry(c_bg2test, "particles >=3");
1428 legend2test->AddEntry(c_bgtest, "particles = 2");
1429 legend2test->AddEntry(c_sigtest, "particles = 1");
1430 legend2test->Draw();
1431
1432 mlpa_canvas->cd(7);
1433 gPad->SetLogy();
1434 c_bg->DrawNormalized();
1435 c_bg2->DrawNormalized("same");
1436 c_sig->DrawNormalized("same");
1437 legend2->Draw();
1438
1439 mlpa_canvas->cd(8);
1440 gPad->SetLogy();
1441 c_bgtest->DrawNormalized();
1442 c_bg2test->DrawNormalized("same");
1443 c_sigtest->DrawNormalized("same");
1444 legend2test->Draw();
1445
1446
1447 mlpa_canvas->cd(0);
1448
1449
1450 mlpa_canvas->SaveAs("weights/result.eps");
1451
1452*/
1453}
1454
Scalar phi() const
phi method
Scalar theta() const
theta method
static Double_t P(Double_t *tt, Double_t *par)
double norm_pitch(const double input, bool addIBL=false)
double norm_posX(const double input, const bool recenter=false)
double norm_layerNumber(const double input)
double norm_thetaBS(const double input)
double norm_layerType(const double input)
double norm_ToT(const double input)
double norm_posY(const double input)
double norm_phi(const double input)
double norm_phiBS(const double input)
double norm_theta(const double input)
double norm_etaModule(const double input)
#define x
#define z
Int_t GetInputDim(void) const
Definition TJetNet.h:54
void Init(void)
Definition TJetNet.cxx:670
Double_t GetLearningRateDecrease(void)
Definition TJetNet.cxx:1192
void SetMomentum(Double_t aValue)
Definition TJetNet.cxx:1118
Int_t GetUpdatingProcedure(void)
Definition TJetNet.cxx:1151
void SetLearningRateDecrease(Double_t aValue)
Definition TJetNet.cxx:1130
void SetErrorMeasure(Int_t aValue)
Definition TJetNet.cxx:1085
void SetOutputTestSet(Int_t aPatternInd, Int_t aOutputInd, Double_t aValue)
Definition TJetNet.h:196
Double_t GetMomentum(void)
Definition TJetNet.cxx:1182
void SetInitialWeightsWidth(Double_t aValue)
Definition TJetNet.cxx:1124
void SetEventWeightTestSet(Int_t aPatternInd, Double_t aValue)
Definition TJetNet.cxx:758
void Evaluate(Int_t aPattern)
Definition TJetNet.cxx:932
Int_t GetPatternsPerUpdate(void)
Definition TJetNet.cxx:1172
Double_t Test(void)
Definition TJetNet.cxx:320
Double_t GetOutput(Int_t aIndex=0)
Definition TJetNet.cxx:948
Double_t GetInitialWeightsWidth(void)
Definition TJetNet.cxx:1187
Int_t GetHiddenLayerSize(Int_t number) const
Definition TJetNet.h:56
void SetInputTrainSet(Int_t aPatternInd, Int_t aInputInd, Double_t aValue)
Definition TJetNet.cxx:728
Int_t GetHiddenLayerDim(void) const
Definition TJetNet.h:55
void SetLearningRate(Double_t aValue)
Definition TJetNet.cxx:1111
Int_t GetUpdatesPerEpoch(void)
Definition TJetNet.cxx:1146
Int_t GetEpochs(void)
Definition TJetNet.h:78
void writeNetworkInfo(Int_t typeOfInfo=0)
Definition TJetNet.cxx:664
Double_t Train(void)
Definition TJetNet.cxx:618
void SetActivationFunction(Int_t aValue)
Definition TJetNet.cxx:1091
void ReadFromFile(TString aFileName="fort.8")
Definition TJetNet.cxx:962
void Shuffle(Bool_t aShuffleTrainSet=true, Bool_t aShuffleTestSet=true)
Definition TJetNet.cxx:1222
Int_t GetErrorMeasure(void)
Definition TJetNet.cxx:1156
void SetUpdatingProcedure(Int_t aValue)
Definition TJetNet.cxx:1078
void readBackTrainedNetwork(const TTrainedNetwork *)
Definition TJetNet.cxx:207
Double_t GetLearningRate(void)
Definition TJetNet.cxx:1177
void SetInputTestSet(Int_t aPatternInd, Int_t aInputInd, Double_t aValue)
Definition TJetNet.cxx:740
void SetUpdatesPerEpoch(Int_t aValue)
Definition TJetNet.cxx:1071
void SetEventWeightTrainSet(Int_t aPatternInd, Double_t aValue)
Definition TJetNet.cxx:752
void SetOutputTrainSet(Int_t aPatternInd, Int_t aOutputInd, Double_t aValue)
Definition TJetNet.cxx:734
TTrainedNetwork * createTrainedNetwork() const
Definition TJetNet.cxx:104
Int_t GetOutputDim(void) const
Definition TJetNet.h:57
Int_t GetActivationFunction(void) const
Definition TJetNet.cxx:1161
void SetPatternsPerUpdate(Int_t aValue)
Definition TJetNet.cxx:1105
std::vector< TH1 * > fromTrainedNetworkToHisto(TTrainedNetwork *) const
TTrainedNetwork * fromHistoToTrainedNetwork(std::vector< TH1 * > &) const
std::vector< Double_t > calculateOutputValues(std::vector< Double_t > &input) const
STL namespace.
void sort(typename DataModel_detail::iterator< DVL > beg, typename DataModel_detail::iterator< DVL > end)
Specialization of sort for DataVector/List.
bool badTrackInfo(bool useTrackEstimate, double theta)
Double_t sigmoid(Double_t x)
void trainNN(TString inputfile, TString outputclass, int nIterations, int dilutionFactor, int nodesFirstLayer, int nodesSecondLayer, int restartTrainingFrom, int nParticlesTraining, bool useTrackEstimate, int nPatternsPerUpdate, double learningRate, double learningRateDecrease, double learningRateMomentum)
bool isBadCluster(int sizeX, int nParticles)
bool skipSingle(int nParticles, int iClus, int dilutionFactor)
int main()
TFile * file