Geant4 9.6.0
Toolkit for the simulation of the passage of particles through matter
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
G4ConvergenceTester.cc
Go to the documentation of this file.
1//
2// ********************************************************************
3// * License and Disclaimer *
4// * *
5// * The Geant4 software is copyright of the Copyright Holders of *
6// * the Geant4 Collaboration. It is provided under the terms and *
7// * conditions of the Geant4 Software License, included in the file *
8// * LICENSE and available at http://cern.ch/geant4/license . These *
9// * include a list of copyright holders. *
10// * *
11// * Neither the authors of this software system, nor their employing *
12// * institutes,nor the agencies providing financial support for this *
13// * work make any representation or warranty, express or implied, *
14// * regarding this software system or assume any liability for its *
15// * use. Please see the license in the file LICENSE and URL above *
16// * for the full disclaimer and the limitation of liability. *
17// * *
18// * This code implementation is the result of the scientific and *
19// * technical work of the GEANT4 collaboration. *
20// * By using, copying, modifying or distributing the software (or *
21// * any work based on the software) you agree to acknowledge its *
22// * use in resulting scientific publications, and indicate your *
23// * acceptance of all terms of the Geant4 Software license. *
24// ********************************************************************
25//
26//
27//
28// Convergence Tests for Monte Carlo results.
29//
30// Reference
31// MCNP(TM) -A General Monte Carlo N-Particle Transport Code
32// Version 4B
33// Judith F. Briesmeister, Editor
34// LA-12625-M, Issued: March 1997, UC 705 and UC 700
35// CHAPTER 2. GEOMETRY, DATA, PHYSICS, AND MATHEMATICS
36// VI. ESTIMATION OF THE MONTE CARLO PRECISION
37//
38// Positives numbers are assumed for inputs
39//
40// Koi, Tatsumi (SLAC/SCCS)
41//
42
44
46 : n(0), sum(0.), mean(0.), var(0.), sd(0.), r(0.), efficiency(0.),
47 r2eff(0.), r2int(0.), shift(0.), vov(0.), fom(0.), largest(0.),
48 largest_score_happened(0), mean_1(0.), var_1(0.), sd_1(0.), r_1(0.),
49 shift_1(0.), vov_1(0.), fom_1(0.), noBinOfHistory(16), slope(0.),
50 noBinOfPDF(10), minimizer(0), noPass(0), noTotal(8)
51{
52 nonzero_histories.clear();
53 largest_scores.clear();
54 largest_scores.push_back( 0.0 );
55
56 history_grid.resize( noBinOfHistory , 0 );
57 mean_history.resize( noBinOfHistory , 0.0 );
58 var_history.resize( noBinOfHistory , 0.0 );
59 sd_history.resize( noBinOfHistory , 0.0 );
60 r_history.resize( noBinOfHistory , 0.0 );
61 vov_history.resize( noBinOfHistory , 0.0 );
62 fom_history.resize( noBinOfHistory , 0.0 );
63 shift_history.resize( noBinOfHistory , 0.0 );
64 e_history.resize( noBinOfHistory , 0.0 );
65 r2eff_history.resize( noBinOfHistory , 0.0 );
66 r2int_history.resize( noBinOfHistory , 0.0 );
67
68 timer = new G4Timer();
69 timer->Start();
70 cpu_time.clear();
71 cpu_time.push_back( 0.0 );
72}
73
74
75
77{
78 delete timer;
79}
80
81
82
84{
85
86 //G4cout << x << G4endl;
87
88 timer->Stop();
89 cpu_time.push_back( timer->GetSystemElapsed() + timer->GetUserElapsed() );
90
91 if ( x == 0.0 )
92 {
93 }
94 else
95 {
96 nonzero_histories.insert( std::pair< G4int , G4double > ( n , x ) );
97 if ( x > largest_scores.back() )
98 {
99// Following serch should become faster if begin from bottom.
100 std::vector< G4double >::iterator it;
101 for ( it = largest_scores.begin() ; it != largest_scores.end() ; it++ )
102 {
103 if ( x > *it )
104 {
105 largest_scores.insert( it , x );
106 break;
107 }
108 }
109
110 if ( largest_scores.size() > 201 )
111 {
112 largest_scores.pop_back();
113 }
114 //G4cout << largest_scores.size() << " " << largest_scores.front() << " " << largest_scores.back() << G4endl;
115 }
116 sum += x;
117 }
118
119 n++;
120 return;
121}
122
123
124
125void G4ConvergenceTester::calStat()
126{
127
128
129 efficiency = double( nonzero_histories.size() ) / n;
130
131 mean = sum / n;
132
133 G4double sum_x2 = 0.0;
134 var = 0.0;
135 shift = 0.0;
136 vov = 0.0;
137
138 G4double xi;
139 std::map< G4int , G4double >::iterator it;
140 for ( it = nonzero_histories.begin() ; it != nonzero_histories.end() ; it++ )
141 {
142 xi = it->second;
143 sum_x2 += xi * xi;
144 var += ( xi - mean ) * ( xi - mean );
145 shift += ( xi - mean ) * ( xi - mean ) * ( xi - mean );
146 vov += ( xi - mean ) * ( xi - mean ) * ( xi - mean ) * ( xi - mean );
147 }
148
149 var += ( n - nonzero_histories.size() ) * mean * mean;
150 shift += ( n - nonzero_histories.size() ) * mean * mean * mean * ( -1 );
151 vov += ( n - nonzero_histories.size() ) * mean * mean * mean * mean;
152
153 vov = vov / ( var * var ) - 1.0 / n;
154
155 var = var/(n-1);
156
157 sd = std::sqrt ( var );
158
159 r = sd / mean / std::sqrt ( G4double(n) );
160
161 r2eff = ( 1 - efficiency ) / ( efficiency * n );
162 r2int = sum_x2 / ( sum * sum ) - 1 / ( efficiency * n );
163
164 shift = shift / ( 2 * var * n );
165
166 fom = 1 / (r*r) / cpu_time.back();
167
168// Find Largest History
169 //G4double largest = 0.0;
170 largest = 0.0;
171 largest_score_happened = 0;
172 G4double spend_time_of_largest = 0.0;
173 for ( it = nonzero_histories.begin() ; it != nonzero_histories.end() ; it++ )
174 {
175 if ( std::abs ( it->second ) > largest )
176 {
177 largest = it->second;
178 largest_score_happened = it->first;
179 spend_time_of_largest = cpu_time [ it->first+1 ] - cpu_time [ it->first ];
180 }
181 }
182
183 mean_1 = 0.0;
184 var_1 = 0.0;
185 shift_1 = 0.0;
186 vov_1 = 0.0;
187 sd_1 = 0.0;
188 r_1 = 0.0;
189 vov_1 = 0.0;
190
191// G4cout << "The largest history = " << largest << G4endl;
192
193 mean_1 = ( sum + largest ) / ( n + 1 );
194
195 for ( it = nonzero_histories.begin() ; it != nonzero_histories.end() ; it++ )
196 {
197 xi = it->second;
198 var_1 += ( xi - mean_1 ) * ( xi - mean_1 );
199 shift_1 += ( xi - mean_1 ) * ( xi - mean_1 ) * ( xi - mean_1 );
200 vov_1 += ( xi - mean_1 ) * ( xi - mean_1 ) * ( xi - mean_1 ) * ( xi - mean_1 );
201 }
202 xi = largest;
203 var_1 += ( xi - mean_1 ) * ( xi - mean_1 );
204 shift_1 += ( xi - mean_1 ) * ( xi - mean_1 ) * ( xi - mean_1 );
205 vov_1 += ( xi - mean_1 ) * ( xi - mean_1 ) * ( xi - mean_1 ) * ( xi - mean_1 );
206
207 var_1 += ( n - nonzero_histories.size() ) * mean_1 * mean_1;
208 shift_1 += ( n - nonzero_histories.size() ) * mean_1 * mean_1 * mean_1 * ( -1 );
209 vov_1 += ( n - nonzero_histories.size() ) * mean_1 * mean_1 * mean_1 * mean_1;
210
211 vov_1 = vov_1 / ( var_1 * var_1 ) - 1.0 / ( n + 1 );
212
213 var_1 = var_1 / n ;
214
215 sd_1 = std::sqrt ( var_1 );
216
217 r_1 = sd_1 / mean_1 / std::sqrt ( G4double(n + 1) );
218
219 shift_1 = shift_1 / ( 2 * var_1 * ( n + 1 ) );
220
221 fom_1 = 1 / ( r * r ) / ( cpu_time.back() + spend_time_of_largest );
222
223 if ( nonzero_histories.size() < 500 )
224 {
225 G4cout << "Number of non zero history too small to calcuarte SLOPE" << G4endl;
226 }
227 else
228 {
229 G4int i = int ( nonzero_histories.size() );
230
231 // 5% criterion
232 G4int j = int ( i * 0.05 );
233 while ( int( largest_scores.size() ) > j )
234 {
235 largest_scores.pop_back();
236 }
237 calc_slope_fit( largest_scores );
238 }
239
240 calc_grid_point_of_history();
241 calc_stat_history();
242}
243
244
245
246void G4ConvergenceTester::calc_grid_point_of_history()
247{
248
249// histroy_grid [ 0,,,15 ]
250// history_grid [0] 1/16 ,,, history_grid [15] 16/16
251// if number of event is x then history_grid [15] become x-1.
252// 16 -> noBinOfHisotry
253
254 G4int i;
255 for ( i = 1 ; i <= noBinOfHistory ; i++ )
256 {
257 history_grid [ i-1 ] = int ( n / ( double( noBinOfHistory ) ) * i - 0.1 );
258 //G4cout << "history_grid " << i-1 << " " << history_grid [ i-1 ] << G4endl;
259 }
260
261}
262
263
264
265void G4ConvergenceTester::calc_stat_history()
266{
267// G4cout << "i/16 till_ith mean var sd r vov fom shift e r2eff r2int" << G4endl;
268
269 G4int i;
270 for ( i = 1 ; i <= noBinOfHistory ; i++ )
271 {
272
273 G4int ith = history_grid [ i-1 ];
274
275 G4int nonzero_till_ith = 0;
276 G4double xi;
277 G4double mean_till_ith = 0.0;
278 std::map< G4int , G4double >::iterator it;
279
280 for ( it = nonzero_histories.begin() ; it !=nonzero_histories.end() ; it++ )
281 {
282 if ( it->first <= ith )
283 {
284 xi = it->second;
285 mean_till_ith += xi;
286 nonzero_till_ith++;
287 }
288 }
289
290 mean_till_ith = mean_till_ith / ( ith+1 );
291 mean_history [ i-1 ] = mean_till_ith;
292
293 G4double sum_x2_till_ith = 0.0;
294 G4double var_till_ith = 0.0;
295 G4double vov_till_ith = 0.0;
296 G4double shift_till_ith = 0.0;
297
298 for ( it = nonzero_histories.begin() ; it !=nonzero_histories.end() ; it++ )
299 {
300 if ( it->first <= ith )
301 {
302 xi = it->second;
303 sum_x2_till_ith += xi * xi;
304 var_till_ith += ( xi - mean_till_ith ) * ( xi - mean_till_ith );
305 shift_till_ith += ( xi - mean_till_ith ) * ( xi - mean_till_ith ) * ( xi - mean_till_ith );
306 vov_till_ith += ( xi - mean_till_ith ) * ( xi - mean_till_ith ) * ( xi - mean_till_ith ) * ( xi - mean_till_ith );
307 }
308 }
309
310 var_till_ith += ( (ith+1) - nonzero_till_ith ) * mean_till_ith * mean_till_ith;
311
312 vov_till_ith += ( (ith+1) - nonzero_till_ith ) * mean_till_ith * mean_till_ith * mean_till_ith * mean_till_ith ;
313 vov_till_ith = vov_till_ith / ( var_till_ith * var_till_ith ) - 1.0 / (ith+1);
314 vov_history [ i-1 ] = vov_till_ith;
315
316 var_till_ith = var_till_ith / ( ith+1 - 1 );
317 var_history [ i-1 ] = var_till_ith;
318 sd_history [ i-1 ] = std::sqrt( var_till_ith );
319 r_history [ i-1 ] = std::sqrt( var_till_ith ) / mean_till_ith / std::sqrt ( 1.0*(ith+1) );
320
321 fom_history [ i-1 ] = 1 / ( r_history [ i-1 ] * r_history [ i-1 ] ) / cpu_time [ ith ];
322
323 shift_till_ith += ( (ith+1) - nonzero_till_ith ) * mean_till_ith * mean_till_ith * mean_till_ith * ( -1 );
324 shift_till_ith = shift_till_ith / ( 2 * var_till_ith * (ith+1) );
325 shift_history [ i-1 ] = shift_till_ith;
326
327 e_history [ i-1 ] = 1.0*nonzero_till_ith / (ith+1);
328 r2eff_history [ i-1 ] = ( 1 - e_history [ i-1 ] ) / ( e_history [ i-1 ] * (ith+1) );
329
330 G4double sum_till_ith = mean_till_ith * (ith+1);
331 r2int_history [ i-1 ] = ( sum_x2_till_ith ) / ( sum_till_ith * sum_till_ith ) - 1 / ( e_history [ i-1 ] * (ith+1) );
332
333 }
334
335}
336
337
338
340{
341 calStat();
342
343 G4cout << "EFFICIENCY = " << efficiency << G4endl;
344 G4cout << "MEAN = " << mean << G4endl;
345 G4cout << "VAR = " << var << G4endl;
346 G4cout << "SD = " << sd << G4endl;
347 G4cout << "R = "<< r << G4endl;
348 G4cout << "SHIFT = "<< shift << G4endl;
349 G4cout << "VOV = "<< vov << G4endl;
350 G4cout << "FOM = "<< fom << G4endl;
351
352 G4cout << "THE LARGEST SCORE = " << largest << " and it happend at " << largest_score_happened << "th event" << G4endl;
353 G4cout << "Affected Mean = " << mean_1 << " and its ratio to orignal is " << mean_1/mean << G4endl;
354 G4cout << "Affected VAR = " << var_1 << " and its ratio to orignal is " << var_1/var << G4endl;
355 G4cout << "Affected R = " << r_1 << " and its ratio to orignal is " << r_1/r << G4endl;
356 G4cout << "Affected SHIFT = " << shift_1 << " and its ratio to orignal is " << shift_1/shift << G4endl;
357 G4cout << "Affected FOM = " << fom_1 << " and its ratio to orignal is " << fom_1/fom << G4endl;
358
359 check_stat_history();
360
361// check SLOPE and output result
362 if ( slope >= 3 )
363 {
364 noPass++;
365 G4cout << "SLOPE is large enough" << G4endl;
366 }
367 else
368 {
369 G4cout << "SLOPE is not large enough" << G4endl;
370 }
371
372 G4cout << "This result passes " << noPass << " / "<< noTotal << " Convergence Test." << G4endl;
373 G4cout << G4endl;
374
375}
376
378{
379 G4cout << "i/" << noBinOfHistory << " till_ith mean var sd r vov fom shift e r2eff r2int" << G4endl;
380 for ( G4int i = 1 ; i <= noBinOfHistory ; i++ )
381 {
382 G4cout << i << " "
383 << history_grid [ i-1 ] << " "
384 << mean_history [ i-1 ] << " "
385 << var_history [ i-1 ] << " "
386 << sd_history [ i-1 ] << " "
387 << r_history [ i-1 ] << " "
388 << vov_history [ i-1 ] << " "
389 << fom_history [ i-1 ] << " "
390 << shift_history [ i-1 ] << " "
391 << e_history [ i-1 ] << " "
392 << r2eff_history [ i-1 ] << " "
393 << r2int_history [ i-1 ] << " "
394 << G4endl;
395 }
396}
397
398void G4ConvergenceTester::check_stat_history()
399{
400
401// 1 sigma rejection for null hypothesis
402
403 std::vector<G4double> first_ally;
404 std::vector<G4double> second_ally;
405
406// use 2nd half of hisories
407 G4int N = mean_history.size() / 2;
408 G4int i;
409
410 G4double pearson_r;
411 G4double t;
412
413 first_ally.resize( N );
414 second_ally.resize( N );
415
416// Mean
417
418 for ( i = 0 ; i < N ; i++ )
419 {
420 first_ally [ i ] = history_grid [ N + i ];
421 second_ally [ i ] = mean_history [ N + i ];
422 }
423
424 pearson_r = calc_Pearson_r ( N , first_ally , second_ally );
425 t = pearson_r * std::sqrt ( ( N - 2 ) / ( 1 - pearson_r * pearson_r ) );
426
427 if ( t < 0.429318 ) // Student t of (Degree of freedom = N-2 )
428 {
429 G4cout << "MEAN distribution is RANDOM" << G4endl;
430 noPass++;
431 }
432 else
433 {
434 G4cout << "MEAN distribution is not RANDOM" << G4endl;
435 }
436
437
438// R
439
440 for ( i = 0 ; i < N ; i++ )
441 {
442 first_ally [ i ] = 1.0 / std::sqrt ( G4double(history_grid [ N + i ]) );
443 second_ally [ i ] = r_history [ N + i ];
444 }
445
446 pearson_r = calc_Pearson_r ( N , first_ally , second_ally );
447 t = pearson_r * std::sqrt ( ( N - 2 ) / ( 1 - pearson_r * pearson_r ) );
448
449 if ( t > 1.090546 )
450 {
451 G4cout << "r follows 1/std::sqrt(N)" << G4endl;
452 noPass++;
453 }
454 else
455 {
456 G4cout << "r does not follow 1/std::sqrt(N)" << G4endl;
457 }
458
459 if ( is_monotonically_decrease( second_ally ) == true )
460 {
461 G4cout << "r is monotonically decrease " << G4endl;
462 }
463 else
464 {
465 G4cout << "r is NOT monotonically decrease " << G4endl;
466 }
467
468 if ( r_history.back() < 0.1 )
469 {
470 G4cout << "r is less than 0.1. r = " << r_history.back() << G4endl;
471 noPass++;
472 }
473 else
474 {
475 G4cout << "r is NOT less than 0.1. r = " << r_history.back() << G4endl;
476 }
477
478
479// VOV
480 for ( i = 0 ; i < N ; i++ )
481 {
482 first_ally [ i ] = 1.0 / history_grid [ N + i ];
483 second_ally [ i ] = vov_history [ N + i ];
484 }
485
486 pearson_r = calc_Pearson_r ( N , first_ally , second_ally );
487 t = pearson_r * std::sqrt ( ( N - 2 ) / ( 1 - pearson_r * pearson_r ) );
488
489 if ( t > 1.090546 )
490 {
491 G4cout << "VOV follows 1/std::sqrt(N)" << G4endl;
492 noPass++;
493 }
494 else
495 {
496 G4cout << "VOV does not follow 1/std::sqrt(N)" << G4endl;
497 }
498
499 if ( is_monotonically_decrease( second_ally ) == true )
500 {
501 G4cout << "VOV is monotonically decrease " << G4endl;
502 }
503 else
504 {
505 G4cout << "VOV is NOT monotonically decrease " << G4endl;
506 }
507
508// FOM
509
510 for ( i = 0 ; i < N ; i++ )
511 {
512 first_ally [ i ] = history_grid [ N + i ];
513 second_ally [ i ] = fom_history [ N + i ];
514 }
515
516 pearson_r = calc_Pearson_r ( N , first_ally , second_ally );
517 t = pearson_r * std::sqrt ( ( N - 2 ) / ( 1 - pearson_r * pearson_r ) );
518
519 if ( t < 0.429318 )
520 {
521 G4cout << "FOM distribution is RANDOM" << G4endl;
522 noPass++;
523 }
524 else
525 {
526 G4cout << "FOM distribution is not RANDOM" << G4endl;
527 }
528
529}
530
531
532
533G4double G4ConvergenceTester::calc_Pearson_r ( G4int N , std::vector<G4double> first_ally , std::vector<G4double> second_ally )
534{
535 G4double first_mean = 0.0;
536 G4double second_mean = 0.0;
537
538 G4int i;
539 for ( i = 0 ; i < N ; i++ )
540 {
541 first_mean += first_ally [ i ];
542 second_mean += second_ally [ i ];
543 }
544 first_mean = first_mean / N;
545 second_mean = second_mean / N;
546
547 G4double a = 0.0;
548 for ( i = 0 ; i < N ; i++ )
549 {
550 a += ( first_ally [ i ] - first_mean ) * ( second_ally [ i ] - second_mean );
551 }
552
553 G4double b1 = 0.0;
554 G4double b2 = 0.0;
555 for ( i = 0 ; i < N ; i++ )
556 {
557 b1 += ( first_ally [ i ] - first_mean ) * ( first_ally [ i ] - first_mean );
558 b2 += ( second_ally [ i ] - second_mean ) * ( second_ally [ i ] - second_mean );
559 }
560
561 G4double rds = a / std::sqrt ( b1 * b2 );
562
563 return rds;
564}
565
566
567
568G4bool G4ConvergenceTester::is_monotonically_decrease ( std::vector<G4double> ally )
569{
570
571 std::vector<G4double>::iterator it;
572 for ( it = ally.begin() ; it != ally.end() - 1 ; it++ )
573 {
574 if ( *it < *(it+1) ) return FALSE;
575 }
576
577 noPass++;
578 return TRUE;
579}
580
581
582
583//void G4ConvergenceTester::calc_slope_fit ( std::vector<G4double> largest_socres )
584void G4ConvergenceTester::calc_slope_fit ( std::vector<G4double> )
585{
586
587 // create PDF bins
588 G4double max = largest_scores.front();
589 G4int last = int ( largest_scores.size() );
590 G4double min = 0.0;
591 if ( largest_scores.back() != 0 )
592 {
593 min = largest_scores.back();
594 }
595 else
596 {
597 min = largest_scores[ last-1 ];
598 last = last - 1;
599 }
600
601 //G4cout << "largest " << max << G4endl;
602 //G4cout << "last " << min << G4endl;
603
604 if ( max*0.99 < min )
605 {
606 // upper limit is assumed to have been reached
607 slope = 10.0;
608 return;
609 }
610
611 std::vector < G4double > pdf_grid;
612
613 pdf_grid.resize( noBinOfPDF+1 ); // no grid = no bins + 1
614 pdf_grid[ 0 ] = max;
615 pdf_grid[ noBinOfPDF ] = min;
616 G4double log10_max = std::log10( max );
617 G4double log10_min = std::log10( min );
618 G4double log10_delta = log10_max - log10_min;
619 for ( G4int i = 1 ; i < noBinOfPDF ; i++ )
620 {
621 pdf_grid[i] = std::pow ( 10.0 , log10_max - log10_delta/10.0*(i) );
622 //G4cout << "pdf i " << i << " " << pdf_grid[i] << G4endl;
623 }
624
625 std::vector < G4double > pdf;
626 pdf.resize( noBinOfPDF );
627
628 for ( G4int j=0 ; j < last ; j ++ )
629 {
630 for ( G4int i = 0 ; i < 11 ; i++ )
631 {
632 if ( largest_scores[j] >= pdf_grid[i+1] )
633 {
634 pdf[i] += 1.0 / ( pdf_grid[i] - pdf_grid[i+1] ) / n;
635 //G4cout << "pdf " << j << " " << i << " " << largest_scores[j] << " " << G4endl;
636 break;
637 }
638 }
639 }
640
641 f_xi.resize( noBinOfPDF );
642 f_yi.resize( noBinOfPDF );
643 for ( G4int i = 0 ; i < noBinOfPDF ; i++ )
644 {
645 //G4cout << "pdf i " << i << " " << (pdf_grid[i]+pdf_grid[i+1])/2 << " " << pdf[i] << G4endl;
646 f_xi[i] = (pdf_grid[i]+pdf_grid[i+1])/2;
647 f_yi[i] = pdf[i];
648 }
649
650 // number of variables ( a and k )
651 minimizer = new G4SimplexDownhill<G4ConvergenceTester> ( this , 2 );
652 //G4double minimum = minimizer->GetMinimum();
653 std::vector<G4double> mp = minimizer->GetMinimumPoint();
654 G4double k = mp[1];
655
656 //G4cout << "SLOPE " << 1/mp[1]+1 << G4endl;
657 //G4cout << "SLOPE a " << mp[0] << G4endl;
658 //G4cout << "SLOPE k " << mp[1] << G4endl;
659 //G4cout << "SLOPE minimum " << minimizer->GetMinimum() << G4endl;
660
661 slope = 1/mp[1]+1;
662 if ( k < 1.0/9 ) // Please look Pareto distribution with "sigma=a" and "k"
663 {
664 slope = 10;
665 }
666 if ( slope > 10 )
667 {
668 slope = 10;
669 }
670}
671
672
673
674G4double G4ConvergenceTester::slope_fitting_function ( std::vector< G4double > x )
675{
676
677 G4double a = x[0];
678 G4double k = x[1];
679
680 if ( a <= 0 )
681 {
682 return 3.402823466e+38; // FLOAT_MAX
683 }
684 if ( k == 0 )
685 {
686 return 3.402823466e+38; // FLOAT_MAX
687 }
688
689// f_xi and f_yi is filled at "calc_slope_fit"
690
691 G4double y = 0.0;
692 G4int i;
693 for ( i = 0 ; i < int ( f_yi.size() ) ; i++ )
694 {
695 //if ( 1/a * ( 1 + k * f_xi [ i ] / a ) < 0 )
696 if ( ( 1 + k * f_xi [ i ] / a ) < 0 )
697 {
698 y +=3.402823466e+38; // FLOAT_MAX
699 }
700 else
701 {
702 y += ( f_yi [ i ] - 1/a*std::pow ( 1 + k * f_xi [ i ] / a , - 1/k - 1 ) ) * ( f_yi [ i ] - 1/a*std::pow ( 1 + k * f_xi [ i ] / a , - 1/k - 1 ) );
703 }
704 }
705// G4cout << "y = " << y << G4endl;
706
707 return y;
708}
double G4double
Definition: G4Types.hh:64
int G4int
Definition: G4Types.hh:66
bool G4bool
Definition: G4Types.hh:67
#define G4endl
Definition: G4ios.hh:52
G4DLLIMPORT std::ostream G4cout
std::vector< G4double > GetMinimumPoint()
void Stop()
G4double GetSystemElapsed() const
Definition: G4Timer.cc:119
G4double GetUserElapsed() const
Definition: G4Timer.cc:130
void Start()
#define TRUE
Definition: globals.hh:55
#define FALSE
Definition: globals.hh:52