-
Notifications
You must be signed in to change notification settings - Fork 1
/
relationships-put-to-the-test.html
856 lines (814 loc) · 72.5 KB
/
relationships-put-to-the-test.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
<!DOCTYPE html>
<html lang="" xml:lang="">
<head>
<meta charset="utf-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<title>Chapter 11 Relationships Put to the Test | Probabilistic Reasoning: from an elementary point of view</title>
<meta name="description" content="Learning is inference." />
<meta name="generator" content="bookdown 0.24 and GitBook 2.6.7" />
<meta property="og:title" content="Chapter 11 Relationships Put to the Test | Probabilistic Reasoning: from an elementary point of view" />
<meta property="og:type" content="book" />
<meta property="og:description" content="Learning is inference." />
<meta name="twitter:card" content="summary" />
<meta name="twitter:title" content="Chapter 11 Relationships Put to the Test | Probabilistic Reasoning: from an elementary point of view" />
<meta name="twitter:description" content="Learning is inference." />
<meta name="author" content="William G. Foote" />
<meta name="date" content="2021-11-27" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<meta name="apple-mobile-web-app-capable" content="yes" />
<meta name="apple-mobile-web-app-status-bar-style" content="black" />
<link rel="prev" href="part-four-the-test-of-a-relationship.html"/>
<link rel="next" href="the-journey-continues.html"/>
<script src="libs/header-attrs-2.10/header-attrs.js"></script>
<script src="libs/jquery-3.6.0/jquery-3.6.0.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/fuse.min.js"></script>
<link href="libs/gitbook-2.6.7/css/style.css" rel="stylesheet" />
<link href="libs/gitbook-2.6.7/css/plugin-table.css" rel="stylesheet" />
<link href="libs/gitbook-2.6.7/css/plugin-bookdown.css" rel="stylesheet" />
<link href="libs/gitbook-2.6.7/css/plugin-highlight.css" rel="stylesheet" />
<link href="libs/gitbook-2.6.7/css/plugin-search.css" rel="stylesheet" />
<link href="libs/gitbook-2.6.7/css/plugin-fontsettings.css" rel="stylesheet" />
<link href="libs/gitbook-2.6.7/css/plugin-clipboard.css" rel="stylesheet" />
<link href="libs/anchor-sections-1.0.1/anchor-sections.css" rel="stylesheet" />
<script src="libs/anchor-sections-1.0.1/anchor-sections.js"></script>
<link rel="stylesheet" href="style.css" type="text/css" />
</head>
<body>
<div class="book without-animation with-summary font-size-2 font-family-1" data-basepath=".">
<div class="book-summary">
<nav role="navigation">
<ul class="summary">
<li><a href="./">Probabilistic Reasoning</a></li>
<li class="divider"></li>
<li class="chapter" data-level="" data-path="index.html"><a href="index.html"><i class="fa fa-check"></i>Prologomena for a Future Statistics</a>
<ul>
<li class="chapter" data-level="" data-path="index.html"><a href="index.html#why-this-book"><i class="fa fa-check"></i>Why this book</a></li>
<li class="chapter" data-level="" data-path="index.html"><a href="index.html#premises"><i class="fa fa-check"></i>Premises</a></li>
<li class="chapter" data-level="" data-path="index.html"><a href="index.html#so-many-questions-and-too-little-time"><i class="fa fa-check"></i>So many questions and too little time</a></li>
<li class="chapter" data-level="" data-path="index.html"><a href="index.html#dont-we-know-everything-we-need-to-know"><i class="fa fa-check"></i>Don’t we know everything we need to know?</a></li>
<li class="chapter" data-level="" data-path="index.html"><a href="index.html#what-we-desire"><i class="fa fa-check"></i>What we desire</a></li>
<li class="chapter" data-level="" data-path="index.html"><a href="index.html#frequentist-or-probabilistic"><i class="fa fa-check"></i>Frequentist or probabilistic?</a></li>
<li class="chapter" data-level="" data-path="index.html"><a href="index.html#a-work-in-progress"><i class="fa fa-check"></i>A work in progress</a></li>
</ul></li>
<li class="chapter" data-level="" data-path="part-one-the-basics.html"><a href="part-one-the-basics.html"><i class="fa fa-check"></i>Part One – The Basics</a></li>
<li class="chapter" data-level="1" data-path="counting-the-ways.html"><a href="counting-the-ways.html"><i class="fa fa-check"></i><b>1</b> Counting the Ways</a>
<ul>
<li class="chapter" data-level="1.1" data-path="counting-the-ways.html"><a href="counting-the-ways.html#plausibility-probability-and-information"><i class="fa fa-check"></i><b>1.1</b> Plausibility, probability and information</a></li>
<li class="chapter" data-level="1.2" data-path="counting-the-ways.html"><a href="counting-the-ways.html#some-surprise"><i class="fa fa-check"></i><b>1.2</b> Some Surprise</a></li>
<li class="chapter" data-level="1.3" data-path="counting-the-ways.html"><a href="counting-the-ways.html#how-many-ways"><i class="fa fa-check"></i><b>1.3</b> How many ways?</a></li>
<li class="chapter" data-level="1.4" data-path="counting-the-ways.html"><a href="counting-the-ways.html#back-to-data"><i class="fa fa-check"></i><b>1.4</b> Back to data</a></li>
<li class="chapter" data-level="1.5" data-path="counting-the-ways.html"><a href="counting-the-ways.html#checking-our-grip-on-reality"><i class="fa fa-check"></i><b>1.5</b> Checking our grip on reality</a></li>
</ul></li>
<li class="chapter" data-level="2" data-path="probability-for-real-people.html"><a href="probability-for-real-people.html"><i class="fa fa-check"></i><b>2</b> Probability for Real People</a>
<ul>
<li class="chapter" data-level="2.1" data-path="probability-for-real-people.html"><a href="probability-for-real-people.html#can-we-rationally-reason"><i class="fa fa-check"></i><b>2.1</b> Can we rationally reason?</a>
<ul>
<li class="chapter" data-level="2.1.1" data-path="probability-for-real-people.html"><a href="probability-for-real-people.html#priors-what-we-think-might-happen"><i class="fa fa-check"></i><b>2.1.1</b> Priors: what we think might happen</a></li>
<li class="chapter" data-level="2.1.2" data-path="probability-for-real-people.html"><a href="probability-for-real-people.html#likelihoods-thinking-about-the-data"><i class="fa fa-check"></i><b>2.1.2</b> Likelihoods: thinking about the data</a></li>
<li class="chapter" data-level="2.1.3" data-path="probability-for-real-people.html"><a href="probability-for-real-people.html#altogether-now"><i class="fa fa-check"></i><b>2.1.3</b> Altogether now</a></li>
<li class="chapter" data-level="2.1.4" data-path="probability-for-real-people.html"><a href="probability-for-real-people.html#updating-beliefs"><i class="fa fa-check"></i><b>2.1.4</b> Updating beliefs</a></li>
</ul></li>
<li class="chapter" data-level="2.2" data-path="probability-for-real-people.html"><a href="probability-for-real-people.html#whats-next"><i class="fa fa-check"></i><b>2.2</b> What’s next?</a></li>
<li class="chapter" data-level="2.3" data-path="probability-for-real-people.html"><a href="probability-for-real-people.html#try-this-out-if-this-is-reasonable"><i class="fa fa-check"></i><b>2.3</b> Try this out, if this is reasonable</a></li>
<li class="chapter" data-level="2.4" data-path="probability-for-real-people.html"><a href="probability-for-real-people.html#endnotes"><i class="fa fa-check"></i><b>2.4</b> Endnotes</a></li>
</ul></li>
<li class="chapter" data-level="" data-path="part-two-the-fantastic-four.html"><a href="part-two-the-fantastic-four.html"><i class="fa fa-check"></i>Part Two – The Fantastic Four</a></li>
<li class="chapter" data-level="3" data-path="algorithmics-1-counting-made-easy.html"><a href="algorithmics-1-counting-made-easy.html"><i class="fa fa-check"></i><b>3</b> Algorithmics 1: counting made easy</a>
<ul>
<li class="chapter" data-level="3.1" data-path="algorithmics-1-counting-made-easy.html"><a href="algorithmics-1-counting-made-easy.html#whats-an-algorithm"><i class="fa fa-check"></i><b>3.1</b> What’s an algorithm?</a></li>
<li class="chapter" data-level="3.2" data-path="algorithmics-1-counting-made-easy.html"><a href="algorithmics-1-counting-made-easy.html#our-first-job-unobserved-hypotheses"><i class="fa fa-check"></i><b>3.2</b> Our first job: unobserved hypotheses</a></li>
<li class="chapter" data-level="3.3" data-path="algorithmics-1-counting-made-easy.html"><a href="algorithmics-1-counting-made-easy.html#possibilities-abound"><i class="fa fa-check"></i><b>3.3</b> Possibilities abound</a></li>
<li class="chapter" data-level="3.4" data-path="algorithmics-1-counting-made-easy.html"><a href="algorithmics-1-counting-made-easy.html#observed-data"><i class="fa fa-check"></i><b>3.4</b> Observed data</a></li>
<li class="chapter" data-level="3.5" data-path="algorithmics-1-counting-made-easy.html"><a href="algorithmics-1-counting-made-easy.html#is-anything-really-plausible"><i class="fa fa-check"></i><b>3.5</b> Is anything really plausible?</a></li>
<li class="chapter" data-level="3.6" data-path="algorithmics-1-counting-made-easy.html"><a href="algorithmics-1-counting-made-easy.html#interpretation"><i class="fa fa-check"></i><b>3.6</b> Interpretation</a></li>
<li class="chapter" data-level="3.7" data-path="algorithmics-1-counting-made-easy.html"><a href="algorithmics-1-counting-made-easy.html#locales"><i class="fa fa-check"></i><b>3.7</b> 10 locales?</a></li>
<li class="chapter" data-level="3.8" data-path="algorithmics-1-counting-made-easy.html"><a href="algorithmics-1-counting-made-easy.html#next"><i class="fa fa-check"></i><b>3.8</b> Next</a></li>
<li class="chapter" data-level="3.9" data-path="algorithmics-1-counting-made-easy.html"><a href="algorithmics-1-counting-made-easy.html#references-and-endnotes"><i class="fa fa-check"></i><b>3.9</b> References and endnotes</a></li>
</ul></li>
<li class="chapter" data-level="4" data-path="algorithmics-2-binomial-ups-and-downs.html"><a href="algorithmics-2-binomial-ups-and-downs.html"><i class="fa fa-check"></i><b>4</b> Algorithmics 2: binomial ups and downs</a>
<ul>
<li class="chapter" data-level="4.1" data-path="algorithmics-2-binomial-ups-and-downs.html"><a href="algorithmics-2-binomial-ups-and-downs.html#anatomy-of-an-algorithm"><i class="fa fa-check"></i><b>4.1</b> Anatomy of an algorithm</a></li>
<li class="chapter" data-level="4.2" data-path="algorithmics-2-binomial-ups-and-downs.html"><a href="algorithmics-2-binomial-ups-and-downs.html#ups-and-downs"><i class="fa fa-check"></i><b>4.2</b> Ups and downs</a></li>
<li class="chapter" data-level="4.3" data-path="algorithmics-2-binomial-ups-and-downs.html"><a href="algorithmics-2-binomial-ups-and-downs.html#dispensing-with-the-bag-of-beans"><i class="fa fa-check"></i><b>4.3</b> Dispensing with the bag of beans</a></li>
<li class="chapter" data-level="4.4" data-path="algorithmics-2-binomial-ups-and-downs.html"><a href="algorithmics-2-binomial-ups-and-downs.html#great-expectations"><i class="fa fa-check"></i><b>4.4</b> Great expectations</a></li>
<li class="chapter" data-level="4.5" data-path="algorithmics-2-binomial-ups-and-downs.html"><a href="algorithmics-2-binomial-ups-and-downs.html#then-there-were-eleven"><i class="fa fa-check"></i><b>4.5</b> Then there were eleven</a></li>
<li class="chapter" data-level="4.6" data-path="algorithmics-2-binomial-ups-and-downs.html"><a href="algorithmics-2-binomial-ups-and-downs.html#references-and-endnotes-1"><i class="fa fa-check"></i><b>4.6</b> References and endnotes</a></li>
</ul></li>
<li class="chapter" data-level="5" data-path="algorithmics-3-playing-musical-raptors.html"><a href="algorithmics-3-playing-musical-raptors.html"><i class="fa fa-check"></i><b>5</b> Algorithmics 3: playing musical raptors</a>
<ul>
<li class="chapter" data-level="5.1" data-path="algorithmics-3-playing-musical-raptors.html"><a href="algorithmics-3-playing-musical-raptors.html#is-there-more-to-life-than-binary"><i class="fa fa-check"></i><b>5.1</b> Is there more to life than binary?</a></li>
<li class="chapter" data-level="5.2" data-path="algorithmics-3-playing-musical-raptors.html"><a href="algorithmics-3-playing-musical-raptors.html#exploring-what-we-do-know"><i class="fa fa-check"></i><b>5.2</b> Exploring what we do know</a>
<ul>
<li class="chapter" data-level="5.2.1" data-path="algorithmics-3-playing-musical-raptors.html"><a href="algorithmics-3-playing-musical-raptors.html#summarize-the-data"><i class="fa fa-check"></i><b>5.2.1</b> Summarize the data</a></li>
</ul></li>
<li class="chapter" data-level="5.3" data-path="algorithmics-3-playing-musical-raptors.html"><a href="algorithmics-3-playing-musical-raptors.html#whence-the-binomial-generates-the-poisson"><i class="fa fa-check"></i><b>5.3</b> Whence the binomial generates the Poisson</a></li>
<li class="chapter" data-level="5.4" data-path="algorithmics-3-playing-musical-raptors.html"><a href="algorithmics-3-playing-musical-raptors.html#approximating-poisson"><i class="fa fa-check"></i><b>5.4</b> Approximating Poisson</a></li>
<li class="chapter" data-level="5.5" data-path="algorithmics-3-playing-musical-raptors.html"><a href="algorithmics-3-playing-musical-raptors.html#zooming-in-for-a-closer-look"><i class="fa fa-check"></i><b>5.5</b> Zooming in for a closer look</a></li>
<li class="chapter" data-level="5.6" data-path="algorithmics-3-playing-musical-raptors.html"><a href="algorithmics-3-playing-musical-raptors.html#probability-intervals"><i class="fa fa-check"></i><b>5.6</b> Probability intervals</a></li>
<li class="chapter" data-level="5.7" data-path="algorithmics-3-playing-musical-raptors.html"><a href="algorithmics-3-playing-musical-raptors.html#references-and-endnotes-2"><i class="fa fa-check"></i><b>5.7</b> References and endnotes</a></li>
</ul></li>
<li class="chapter" data-level="6" data-path="algorithmics-4-gaussian-blues.html"><a href="algorithmics-4-gaussian-blues.html"><i class="fa fa-check"></i><b>6</b> Algorithmics 4: Gaussian blues</a>
<ul>
<li class="chapter" data-level="6.1" data-path="algorithmics-4-gaussian-blues.html"><a href="algorithmics-4-gaussian-blues.html#while-we-wait-for-the-other-shoe-to-drop"><i class="fa fa-check"></i><b>6.1</b> While we wait for the other shoe to drop</a></li>
<li class="chapter" data-level="6.2" data-path="algorithmics-4-gaussian-blues.html"><a href="algorithmics-4-gaussian-blues.html#is-there-more-to-life-than-just-counting"><i class="fa fa-check"></i><b>6.2</b> Is there more to life than just counting?</a></li>
<li class="chapter" data-level="6.3" data-path="algorithmics-4-gaussian-blues.html"><a href="algorithmics-4-gaussian-blues.html#gauss-gauss-where-is-gauss"><i class="fa fa-check"></i><b>6.3</b> Gauss, Gauss, where is Gauss?</a>
<ul>
<li class="chapter" data-level="6.3.1" data-path="algorithmics-4-gaussian-blues.html"><a href="algorithmics-4-gaussian-blues.html#full-time-equivalent"><i class="fa fa-check"></i><b>6.3.1</b> Full time equivalent</a></li>
<li class="chapter" data-level="6.3.2" data-path="algorithmics-4-gaussian-blues.html"><a href="algorithmics-4-gaussian-blues.html#compound-growth"><i class="fa fa-check"></i><b>6.3.2</b> Compound growth</a></li>
<li class="chapter" data-level="6.3.3" data-path="algorithmics-4-gaussian-blues.html"><a href="algorithmics-4-gaussian-blues.html#log-products"><i class="fa fa-check"></i><b>6.3.3</b> Log products</a></li>
</ul></li>
<li class="chapter" data-level="6.4" data-path="algorithmics-4-gaussian-blues.html"><a href="algorithmics-4-gaussian-blues.html#assume-and-simplify"><i class="fa fa-check"></i><b>6.4</b> Assume and simplify</a></li>
<li class="chapter" data-level="6.5" data-path="algorithmics-4-gaussian-blues.html"><a href="algorithmics-4-gaussian-blues.html#how-do-we-get-there"><i class="fa fa-check"></i><b>6.5</b> How do we get there?</a></li>
<li class="chapter" data-level="6.6" data-path="algorithmics-4-gaussian-blues.html"><a href="algorithmics-4-gaussian-blues.html#grid-lock"><i class="fa fa-check"></i><b>6.6</b> Grid lock</a></li>
<li class="chapter" data-level="6.7" data-path="algorithmics-4-gaussian-blues.html"><a href="algorithmics-4-gaussian-blues.html#onward-we-march"><i class="fa fa-check"></i><b>6.7</b> Onward we march</a></li>
<li class="chapter" data-level="6.8" data-path="algorithmics-4-gaussian-blues.html"><a href="algorithmics-4-gaussian-blues.html#what-does-it-all-mean"><i class="fa fa-check"></i><b>6.8</b> What does it all mean?</a></li>
<li class="chapter" data-level="6.9" data-path="algorithmics-4-gaussian-blues.html"><a href="algorithmics-4-gaussian-blues.html#a-provisional-finding"><i class="fa fa-check"></i><b>6.9</b> A provisional finding</a></li>
<li class="chapter" data-level="6.10" data-path="algorithmics-4-gaussian-blues.html"><a href="algorithmics-4-gaussian-blues.html#just-one-more-thing"><i class="fa fa-check"></i><b>6.10</b> Just one more thing</a>
<ul>
<li class="chapter" data-level="6.10.1" data-path="algorithmics-4-gaussian-blues.html"><a href="algorithmics-4-gaussian-blues.html#one-way"><i class="fa fa-check"></i><b>6.10.1</b> One way</a></li>
<li class="chapter" data-level="6.10.2" data-path="algorithmics-4-gaussian-blues.html"><a href="algorithmics-4-gaussian-blues.html#or-the-other"><i class="fa fa-check"></i><b>6.10.2</b> Or the other</a></li>
</ul></li>
<li class="chapter" data-level="6.11" data-path="algorithmics-4-gaussian-blues.html"><a href="algorithmics-4-gaussian-blues.html#references-and-endnotes-3"><i class="fa fa-check"></i><b>6.11</b> References and endnotes</a></li>
</ul></li>
<li class="chapter" data-level="" data-path="part-three-rubber-meets-the-road.html"><a href="part-three-rubber-meets-the-road.html"><i class="fa fa-check"></i>Part Three – Rubber meets the road</a></li>
<li class="chapter" data-level="7" data-path="gausss-robots-again.html"><a href="gausss-robots-again.html"><i class="fa fa-check"></i><b>7</b> Gauss’s robots again</a>
<ul>
<li class="chapter" data-level="7.1" data-path="gausss-robots-again.html"><a href="gausss-robots-again.html#an-auspicious-result"><i class="fa fa-check"></i><b>7.1</b> An auspicious result</a></li>
<li class="chapter" data-level="7.2" data-path="gausss-robots-again.html"><a href="gausss-robots-again.html#tale-of-two-populations"><i class="fa fa-check"></i><b>7.2</b> Tale of two populations</a></li>
<li class="chapter" data-level="7.3" data-path="gausss-robots-again.html"><a href="gausss-robots-again.html#education-is-the-key"><i class="fa fa-check"></i><b>7.3</b> Education is the key</a></li>
<li class="chapter" data-level="7.4" data-path="gausss-robots-again.html"><a href="gausss-robots-again.html#sample-until-we-drop"><i class="fa fa-check"></i><b>7.4</b> Sample until we drop</a></li>
<li class="chapter" data-level="7.5" data-path="gausss-robots-again.html"><a href="gausss-robots-again.html#results-results-we-want-results"><i class="fa fa-check"></i><b>7.5</b> Results, results, we want results!</a></li>
<li class="chapter" data-level="7.6" data-path="gausss-robots-again.html"><a href="gausss-robots-again.html#yet-another-rocky-road-we-have-traveled"><i class="fa fa-check"></i><b>7.6</b> Yet another rocky road we have traveled</a></li>
</ul></li>
<li class="chapter" data-level="8" data-path="gausss-robots-go-rogue.html"><a href="gausss-robots-go-rogue.html"><i class="fa fa-check"></i><b>8</b> Gauss’s robots go rogue</a>
<ul>
<li class="chapter" data-level="8.1" data-path="gausss-robots-go-rogue.html"><a href="gausss-robots-go-rogue.html#spreadsheets-really"><i class="fa fa-check"></i><b>8.1</b> Spreadsheets? Really?</a></li>
<li class="chapter" data-level="8.2" data-path="gausss-robots-go-rogue.html"><a href="gausss-robots-go-rogue.html#an-auspicious-result-again"><i class="fa fa-check"></i><b>8.2</b> An auspicious result again?</a></li>
<li class="chapter" data-level="8.3" data-path="gausss-robots-go-rogue.html"><a href="gausss-robots-go-rogue.html#the-most-uninformative-distribution"><i class="fa fa-check"></i><b>8.3</b> The most uninformative distribution</a></li>
<li class="chapter" data-level="8.4" data-path="gausss-robots-go-rogue.html"><a href="gausss-robots-go-rogue.html#simulate-until-morale-improves"><i class="fa fa-check"></i><b>8.4</b> Simulate until morale improves!</a></li>
<li class="chapter" data-level="8.5" data-path="gausss-robots-go-rogue.html"><a href="gausss-robots-go-rogue.html#is-it-true-that-gauss-is-in-the-house-again"><i class="fa fa-check"></i><b>8.5</b> Is it true that Gauss is in the house again?</a></li>
<li class="chapter" data-level="8.6" data-path="gausss-robots-go-rogue.html"><a href="gausss-robots-go-rogue.html#and-again"><i class="fa fa-check"></i><b>8.6</b> And again?</a></li>
<li class="chapter" data-level="8.7" data-path="gausss-robots-go-rogue.html"><a href="gausss-robots-go-rogue.html#the-association"><i class="fa fa-check"></i><b>8.7</b> The Association</a></li>
<li class="chapter" data-level="8.8" data-path="gausss-robots-go-rogue.html"><a href="gausss-robots-go-rogue.html#a-tale-of-coir"><i class="fa fa-check"></i><b>8.8</b> A tale of coir</a>
<ul>
<li class="chapter" data-level="8.8.1" data-path="gausss-robots-go-rogue.html"><a href="gausss-robots-go-rogue.html#business-situation"><i class="fa fa-check"></i><b>8.8.1</b> Business Situation</a></li>
<li class="chapter" data-level="8.8.2" data-path="gausss-robots-go-rogue.html"><a href="gausss-robots-go-rogue.html#business-questions"><i class="fa fa-check"></i><b>8.8.2</b> Business Questions</a></li>
<li class="chapter" data-level="8.8.3" data-path="gausss-robots-go-rogue.html"><a href="gausss-robots-go-rogue.html#data"><i class="fa fa-check"></i><b>8.8.3</b> Data</a></li>
<li class="chapter" data-level="8.8.4" data-path="gausss-robots-go-rogue.html"><a href="gausss-robots-go-rogue.html#analysis"><i class="fa fa-check"></i><b>8.8.4</b> Analysis</a></li>
<li class="chapter" data-level="8.8.5" data-path="gausss-robots-go-rogue.html"><a href="gausss-robots-go-rogue.html#results"><i class="fa fa-check"></i><b>8.8.5</b> Results</a></li>
</ul></li>
<li class="chapter" data-level="8.9" data-path="gausss-robots-go-rogue.html"><a href="gausss-robots-go-rogue.html#endnotes-1"><i class="fa fa-check"></i><b>8.9</b> Endnotes</a></li>
</ul></li>
<li class="chapter" data-level="9" data-path="credible-interval-training.html"><a href="credible-interval-training.html"><i class="fa fa-check"></i><b>9</b> Credible interval training?</a>
<ul>
<li class="chapter" data-level="9.1" data-path="credible-interval-training.html"><a href="credible-interval-training.html#imagine-this"><i class="fa fa-check"></i><b>9.1</b> Imagine this…</a></li>
<li class="chapter" data-level="9.2" data-path="credible-interval-training.html"><a href="credible-interval-training.html#try-this-on-for-size"><i class="fa fa-check"></i><b>9.2</b> Try this on for size</a></li>
<li class="chapter" data-level="9.3" data-path="credible-interval-training.html"><a href="credible-interval-training.html#what-about-the-sampled-standard-deviation"><i class="fa fa-check"></i><b>9.3</b> What about the sampled standard deviation?</a>
<ul>
<li class="chapter" data-level="9.3.1" data-path="credible-interval-training.html"><a href="credible-interval-training.html#heres-the-promised-derivation"><i class="fa fa-check"></i><b>9.3.1</b> Here’s the promised derivation</a></li>
</ul></li>
<li class="chapter" data-level="9.4" data-path="credible-interval-training.html"><a href="credible-interval-training.html#probability-intervals-1-known-population-standard-deviation"><i class="fa fa-check"></i><b>9.4</b> Probability intervals 1: known population standard deviation</a></li>
<li class="chapter" data-level="9.5" data-path="credible-interval-training.html"><a href="credible-interval-training.html#our-first-procedure-emerges"><i class="fa fa-check"></i><b>9.5</b> Our first procedure emerges</a></li>
<li class="chapter" data-level="9.6" data-path="credible-interval-training.html"><a href="credible-interval-training.html#probability-intervals-2-on-to-the-unknown-standard-deviation"><i class="fa fa-check"></i><b>9.6</b> Probability intervals 2: on to the unknown standard deviation</a>
<ul>
<li class="chapter" data-level="9.6.1" data-path="credible-interval-training.html"><a href="credible-interval-training.html#by-the-way-who-is-student"><i class="fa fa-check"></i><b>9.6.1</b> By the way, who is Student?</a></li>
</ul></li>
<li class="chapter" data-level="9.7" data-path="credible-interval-training.html"><a href="credible-interval-training.html#our-second-procedure"><i class="fa fa-check"></i><b>9.7</b> Our second procedure</a></li>
<li class="chapter" data-level="9.8" data-path="credible-interval-training.html"><a href="credible-interval-training.html#exercises"><i class="fa fa-check"></i><b>9.8</b> Exercises</a></li>
</ul></li>
<li class="chapter" data-level="10" data-path="hypothetically-speaking.html"><a href="hypothetically-speaking.html"><i class="fa fa-check"></i><b>10</b> Hypothetically Speaking</a>
<ul>
<li class="chapter" data-level="10.1" data-path="hypothetically-speaking.html"><a href="hypothetically-speaking.html#imagine-this-1"><i class="fa fa-check"></i><b>10.1</b> Imagine this…</a>
<ul>
<li class="chapter" data-level="10.1.1" data-path="hypothetically-speaking.html"><a href="hypothetically-speaking.html#for-those-who-really-want-to-or-even-need-to"><i class="fa fa-check"></i><b>10.1.1</b> For those who really want to, or even need to</a></li>
<li class="chapter" data-level="10.1.2" data-path="hypothetically-speaking.html"><a href="hypothetically-speaking.html#finally-an-excel-screenshot"><i class="fa fa-check"></i><b>10.1.2</b> Finally an excel screenshot</a></li>
</ul></li>
<li class="chapter" data-level="10.2" data-path="hypothetically-speaking.html"><a href="hypothetically-speaking.html#can-we-be-wrong"><i class="fa fa-check"></i><b>10.2</b> Can we be wrong?</a></li>
<li class="chapter" data-level="10.3" data-path="hypothetically-speaking.html"><a href="hypothetically-speaking.html#yet-another-way"><i class="fa fa-check"></i><b>10.3</b> Yet another way</a>
<ul>
<li class="chapter" data-level="10.3.1" data-path="hypothetically-speaking.html"><a href="hypothetically-speaking.html#population-standard-deviation-known"><i class="fa fa-check"></i><b>10.3.1</b> Population standard deviation known</a></li>
<li class="chapter" data-level="10.3.2" data-path="hypothetically-speaking.html"><a href="hypothetically-speaking.html#control-is-probability"><i class="fa fa-check"></i><b>10.3.2</b> Control is probability</a></li>
<li class="chapter" data-level="10.3.3" data-path="hypothetically-speaking.html"><a href="hypothetically-speaking.html#on-to-the-unknown"><i class="fa fa-check"></i><b>10.3.3</b> On to the unknown</a></li>
<li class="chapter" data-level="10.3.4" data-path="hypothetically-speaking.html"><a href="hypothetically-speaking.html#on-with-our-story"><i class="fa fa-check"></i><b>10.3.4</b> On with our story…</a></li>
</ul></li>
<li class="chapter" data-level="10.4" data-path="hypothetically-speaking.html"><a href="hypothetically-speaking.html#exercises-1"><i class="fa fa-check"></i><b>10.4</b> Exercises</a></li>
</ul></li>
<li class="chapter" data-level="" data-path="part-four-the-test-of-a-relationship.html"><a href="part-four-the-test-of-a-relationship.html"><i class="fa fa-check"></i>Part Four – The Test of a Relationship</a></li>
<li class="chapter" data-level="11" data-path="relationships-put-to-the-test.html"><a href="relationships-put-to-the-test.html"><i class="fa fa-check"></i><b>11</b> Relationships Put to the Test</a>
<ul>
<li class="chapter" data-level="11.1" data-path="relationships-put-to-the-test.html"><a href="relationships-put-to-the-test.html#its-not-so-hard-to-imagine-this"><i class="fa fa-check"></i><b>11.1</b> It’s not so hard to imagine this…</a></li>
<li class="chapter" data-level="11.2" data-path="relationships-put-to-the-test.html"><a href="relationships-put-to-the-test.html#the-maths-the-maths"><i class="fa fa-check"></i><b>11.2</b> The maths! The maths!</a>
<ul>
<li class="chapter" data-level="11.2.1" data-path="relationships-put-to-the-test.html"><a href="relationships-put-to-the-test.html#what-did-we-all-expect"><i class="fa fa-check"></i><b>11.2.1</b> What did we all expect?</a></li>
<li class="chapter" data-level="11.2.2" data-path="relationships-put-to-the-test.html"><a href="relationships-put-to-the-test.html#walking-the-straight-line"><i class="fa fa-check"></i><b>11.2.2</b> Walking the straight line</a></li>
<li class="chapter" data-level="11.2.3" data-path="relationships-put-to-the-test.html"><a href="relationships-put-to-the-test.html#a-short-variance-diatribe"><i class="fa fa-check"></i><b>11.2.3</b> A short variance diatribe</a></li>
</ul></li>
<li class="chapter" data-level="11.3" data-path="relationships-put-to-the-test.html"><a href="relationships-put-to-the-test.html#does-education-matter"><i class="fa fa-check"></i><b>11.3</b> Does education matter?</a></li>
<li class="chapter" data-level="11.4" data-path="relationships-put-to-the-test.html"><a href="relationships-put-to-the-test.html#back-to-the-business-at-hand"><i class="fa fa-check"></i><b>11.4</b> Back to the business at hand</a></li>
<li class="chapter" data-level="11.5" data-path="relationships-put-to-the-test.html"><a href="relationships-put-to-the-test.html#does-it-really-matter"><i class="fa fa-check"></i><b>11.5</b> Does it really matter?</a></li>
<li class="chapter" data-level="11.6" data-path="relationships-put-to-the-test.html"><a href="relationships-put-to-the-test.html#references-and-endnotes-4"><i class="fa fa-check"></i><b>11.6</b> References and endnotes</a></li>
</ul></li>
<li class="chapter" data-level="12" data-path="the-journey-continues.html"><a href="the-journey-continues.html"><i class="fa fa-check"></i><b>12</b> The journey continues</a>
<ul>
<li class="chapter" data-level="12.1" data-path="the-journey-continues.html"><a href="the-journey-continues.html#backing-up"><i class="fa fa-check"></i><b>12.1</b> Backing up</a></li>
<li class="chapter" data-level="12.2" data-path="the-journey-continues.html"><a href="the-journey-continues.html#fences-and-neighbors"><i class="fa fa-check"></i><b>12.2</b> Fences and neighbors</a>
<ul>
<li class="chapter" data-level="12.2.1" data-path="the-journey-continues.html"><a href="the-journey-continues.html#tukeys-fences."><i class="fa fa-check"></i><b>12.2.1</b> Tukey’s fences.</a></li>
<li class="chapter" data-level="12.2.2" data-path="the-journey-continues.html"><a href="the-journey-continues.html#credibility-intervals."><i class="fa fa-check"></i><b>12.2.2</b> Credibility intervals.</a></li>
</ul></li>
<li class="chapter" data-level="12.3" data-path="the-journey-continues.html"><a href="the-journey-continues.html#binomial-raptors."><i class="fa fa-check"></i><b>12.3</b> Binomial raptors.</a>
<ul>
<li class="chapter" data-level="12.3.1" data-path="the-journey-continues.html"><a href="the-journey-continues.html#cloudy-or-clear."><i class="fa fa-check"></i><b>12.3.1</b> Cloudy or clear.</a></li>
<li class="chapter" data-level="12.3.2" data-path="the-journey-continues.html"><a href="the-journey-continues.html#binomial-sightings."><i class="fa fa-check"></i><b>12.3.2</b> Binomial sightings.</a></li>
<li class="chapter" data-level="12.3.3" data-path="the-journey-continues.html"><a href="the-journey-continues.html#poisson-raptors."><i class="fa fa-check"></i><b>12.3.3</b> Poisson raptors.</a></li>
<li class="chapter" data-level="12.3.4" data-path="the-journey-continues.html"><a href="the-journey-continues.html#poisson-expectations."><i class="fa fa-check"></i><b>12.3.4</b> Poisson expectations.</a></li>
</ul></li>
<li class="chapter" data-level="12.4" data-path="the-journey-continues.html"><a href="the-journey-continues.html#managing-relationships"><i class="fa fa-check"></i><b>12.4</b> Managing relationships</a>
<ul>
<li class="chapter" data-level="12.4.1" data-path="the-journey-continues.html"><a href="the-journey-continues.html#drawing-the-line"><i class="fa fa-check"></i><b>12.4.1</b> Drawing the line</a></li>
<li class="chapter" data-level="12.4.2" data-path="the-journey-continues.html"><a href="the-journey-continues.html#does-it-matter"><i class="fa fa-check"></i><b>12.4.2</b> Does it matter?</a></li>
</ul></li>
</ul></li>
<li class="chapter" data-level="" data-path="references.html"><a href="references.html"><i class="fa fa-check"></i>References</a></li>
<li class="divider"></li>
<li><a href="https://github.com/rstudio/bookdown" target="blank">Published with bookdown</a></li>
</ul>
</nav>
</div>
<div class="book-body">
<div class="body-inner">
<div class="book-header" role="navigation">
<h1>
<i class="fa fa-circle-o-notch fa-spin"></i><a href="./">Probabilistic Reasoning: from an elementary point of view</a>
</h1>
</div>
<div class="page-wrapper" tabindex="-1" role="main">
<div class="page-inner">
<section class="normal" id="section-">
<div id="relationships-put-to-the-test" class="section level1" number="11">
<h1><span class="header-section-number">Chapter 11</span> Relationships Put to the Test</h1>
<script>
function showText(y) {
var x = document.getElementById(y);
if (x.style.display === "none") {
x.style.display = "block";
} else {
x.style.display = "none";
}
}
</script>
<div id="its-not-so-hard-to-imagine-this" class="section level2" number="11.1">
<h2><span class="header-section-number">11.1</span> It’s not so hard to imagine this…</h2>
<p>There are lines everywhere! At the store, in cafeterias, waiting for buses, trains, our friends to pick us up, the line formed by the process of doing homework, baking a cake, getting an insight. All of this takes time. Waiting times exploit our sense of satisfaction and accomplishment. We desire short waiting lines especially when we do not prefer to be in the line in the first place. The opposite happens when we listen to deeply moving music, regard a dramatically poignant painting: we want the moment to last.</p>
<p>Our team is about to assist the College’s administration of COVID-19 vaccinations on campus. Organizers have tasked us with analyzing the results of the duration of time students, faculty, and staff stand in line waiting for a vaccination. The times have been observed in morning and afternoon shifts. A way of thinking about waiting times is this model.</p>
<p><span class="math display">\[
\mu_{i} = \alpha_{i} + \beta_{i}A_i
\]</span></p>
<p>where <span class="math inline">\(\mu_i\)</span> is the average waiting time in minutes at vaccination station <span class="math inline">\(i\)</span>, <span class="math inline">\(\alpha_i\)</span> is the average morning waiting time, <span class="math inline">\(\beta_i\)</span> is the average difference in morning and afternoon waiting times, and <span class="math inline">\(A_i\)</span> is a zero/one indicator of whether we are in the afternoon shift, 1, or present ourselves to the morning shift, 0.</p>
<p>THe model seems simple enough to understand, and possibly communicate to others. Waiting times, we hypothesize, depend upon a single factor, whether the test occurs in the morning, or in the afternoon. If <span class="math inline">\(A_i=0\)</span>, then we observe a waiting time average in the morning only. The average waiting time is this.</p>
<p><span class="math display">\[
\begin{align}
\mu_{i} \mid (A_i=0) &= \alpha_{i} + \beta_{i}(0) \\
\mu_{i} \mid (A_i=0) &= \alpha_{i}
\end{align}
\]</span></p>
<p>Only the intercept <span class="math inline">\(\alpha_i\)</span> matters in this conditional expression.</p>
<p>Given that we observe waiting times in the afternoon, then <span class="math inline">\(A_i = 1\)</span>.</p>
<p><span class="math display">\[
\begin{align}
\mu_{i} \mid (A_i=1) &= \alpha_{i} + \beta_{i}(1) \\
\mu_{i} \mid (A_i=1) &= \alpha_{i} + \beta_{i}
\end{align}
\]</span></p>
<p>This allows us to add the average waiting time differential for the afternoon <span class="math inline">\(\beta_i\)</span> for each station <span class="math inline">\(i\)</span> to the morning average waiting time <span class="math inline">\(\alpha_i\)</span>.</p>
<p>Here is the management problem we might face. If we observe a waiting time of 5 minutes, should we label this a morning-style of waiting? By style, we now abstract from chronological notions of morning and afternoon. We release ourselves from the idea of a clock. We now have two possible regimes: one that looks like a typical chronological morning, the other that mimics an afternoon.</p>
<p>We now focus on the problem of deciding, probabilistically speaking of course, whether a relatively high wait time mimics a morning session or an afternoon session. The implications might influence staffing, scheduling, availability of vaccines on hand, size of facility, effect of weather on waiting lines, and so on, and so forth.</p>
<p>We will focus on a single vaccination station to work out the kinks of our model, and of our thinking. All of this allows us to specify these mutually exclusive hypotheses, at least logically so.</p>
<p><span class="math display">\[
\begin{align}
H_{PM}:&\,\, \mu_{PM} = \alpha + \beta, \, &&&with \, Pr(H_{PM}) &= p \\
H_{AM}:&\,\, \mu_{AM} = \alpha, \, &&&with \, Pr(H_{AM}) &= 1-p
\end{align}
\]</span></p>
<p>While logically mutual exclusivity exists, we can imagine that distributions of <span class="math inline">\(\mu_{PM}\)</span> and <span class="math inline">\(\mu_{AM}\)</span> might overlap in probability. The hypotheses are two classifications of a waiting time. There are two of them and thus we often use the term binary classification to describe what we are to do next. We believe also that the <span class="math inline">\(PM\)</span> regime is <span class="math inline">\(p\)</span> probable, so that the <span class="math inline">\(AM\)</span> shift must be <span class="math inline">\(1-p\)</span> probable.<a href="#fn1" class="footnote-ref" id="fnref1"><sup>1</sup></a></p>
<p>Let’s insert data into this otherwise very theoretical story. Here is an Excel rendering of these hypotheses and the (Bayesian) binary classification model.</p>
<p><img src="images/13/waiting-time-am-pm-all.jpg" /></p>
<p>The cross-over of the two distributions is fairly high up the frequency axis. The probability to the left of the 1:1 threshold 3.1188 under the AM distribution (blue) curve much larger than the probability under the red PM distribution curve. Any observations of waiting times greater than 3.1188 would most probably be AM shifts, and any less are PM timings.</p>
<p>So ends our initial foray into conditioning a variate and building hypotheses. The condition is of the simplest kind, 0 or 1. The expected values of waiting times depend on the rules of the conditioning conveyed by an intercept and slope. The model naturally yields the two hypotheses.</p>
<p>We now wonder what might happen if the conditioning was more like the wages <span class="math inline">\(W\)</span> and educational attainment <span class="math inline">\(E\)</span> model. We hypothesize that wages depend on educational level. We will use a straightline model again.</p>
<p><span class="math display">\[
\mu_W = \alpha + \beta E
\]</span>
We retrieve wages <span class="math inline">\(W\)</span> as normally distributed with mean <span class="math inline">\(\mu_W\)</span> and <span class="math inline">\(\sigma_W\)</span>.</p>
<p><span class="math display">\[
W \sim \operatorname{N}(\alpha + \beta E ,\, \sigma_W)
\]</span>
We believe there is some sort of dependency, at least an association between <span class="math inline">\(W\)</span> and <span class="math inline">\(E\)</span>. We might measure this with correlation <span class="math inline">\(\rho\)</span>. We might also wonder what comes of <span class="math inline">\(\alpha\)</span> and <span class="math inline">\(\beta\)</span> in the face of <span class="math inline">\(\rho\)</span>. We might as well throw in <span class="math inline">\(\mu_E\)</span> and <span class="math inline">\(\sigma_E\)</span> while we are at it. That’s our next job.</p>
</div>
<div id="the-maths-the-maths" class="section level2" number="11.2">
<h2><span class="header-section-number">11.2</span> The maths! The maths!</h2>
<p>We can fuss about all we want about the maths, but they are impervious to our feelings. They remain. We can stay, or go. If we stay, and spend the time in active pursuit (just like a waiting time, waiting for insight), we might achieve a learning apogee. We suppose that we will stay awhile, for the time being. Now let us dig into our model of waiting times. Our first stop is a set of tools we will need for the excavation.</p>
<p>In what follows we use <span class="math inline">\(Y\)</span> as the wage, the metric we want to generate from its mean and standard deviation. We conjecture that <span class="math inline">\(Y\)</span> depends on <span class="math inline">\(X\)</span>, the level of educational attainment through the conditional mean of <span class="math inline">\(Y \mid X\)</span>, just like we did with vaccination waiting times.</p>
<div id="what-did-we-all-expect" class="section level3" number="11.2.1">
<h3><span class="header-section-number">11.2.1</span> What did we all expect?</h3>
<p>We define expectations as aggregations of two kinds of information. One is the information provided by an array of outcomes <span class="math inline">\(Y_i\)</span> for <span class="math inline">\(i=1 \ldots N\)</span>, where <span class="math inline">\(i\)</span> indexes <span class="math inline">\(N\)</span> outcomes. The other is the array of probabilities assigned to each outcome <span class="math inline">\(\pi_i\)</span>. The frequentist will assign <span class="math inline">\(\pi = f_i/N\)</span>, where <span class="math inline">\(f_i\)</span> is the long-run frequency of occurrence of outcome <span class="math inline">\(i\)</span>. However, we will assign <span class="math inline">\(\pi\)</span> as a normalized index of the logical plausibility of an outcome where all <span class="math inline">\(\pi\)</span>s add up to one and each is somewhere between 0 and 1. This allows us to interpret probability as an extension of logic, where probability quantifies the reasonable expectation that everyone (even a <em>robot</em> or <em>golem</em> ) who shares the same knowledge ( experience, understanding, <em>and</em> judgment) should share in accordance with the rules of conditional probability.<a href="#fn2" class="footnote-ref" id="fnref2"><sup>2</sup></a> All of this ensures we have a complete picture of all of the probability contributions, as weights, of each outcome consistent with a systematic, principled way to reason about uncertainty.</p>
<p>The aggregation is then this expression for the expectation <span class="math inline">\(E\)</span> of outcomes <span class="math inline">\(Y\)</span>.</p>
<p><span class="math display">\[
\begin{align}
\operatorname{E}Y = \sum_{i}^{N} \pi_i Y_i
\end{align}
\]</span></p>
<p>In this way we can say that <span class="math inline">\(\operatorname{E}\)</span> operates on <span class="math inline">\(Y\)</span> where <strong>to operate</strong> means <strong>to aggregate</strong> several outcomes <span class="math inline">\(X_i\)</span> into one number (or possibly function) by multiplying probability weights times outcomes and then summing the products. That’s really two operations combined into the expectation operation. And so goes the maths!</p>
<p>Using this idea of an operator <span class="math inline">\(\operatorname{E}Y\)</span> means we define the aggregation as this expression.</p>
<p><span class="math display">\[
\begin{align}
\operatorname{E} = \sum_{i}^{N} \pi_i \times
\end{align}
\]</span>
Here are some of the algebraic rules of the road when we use this highly condensed short-hand notation.</p>
<p><span class="math display">\[
\begin{align}
Y &= \alpha\,X \\
\operatorname{E}Y &= \operatorname{E}[\alpha\,X] \\
&= \sum_{i}^{N}[\pi_i\,(\alpha\,X_i) ] \\
&= \pi_1\,\alpha\,X_1 + \ldots \pi_N\,\alpha\,X_N \\
&= \alpha \, (\pi_1\,X_1 + \ldots \pi_N\,X_N) \\
&= \alpha\,\sum_{i}^{N}[\pi_i\,(X_i) ] \\
&= \alpha\,\operatorname{E}X
\end{align}
\]</span>
This means that we can take the constant <span class="math inline">\(\alpha\)</span> outside of the expectation operator. All we did, step by step on the logical staircase, is to use the definition of the operator and then manipulate it algebraicly to deduce an equivalent expression.</p>
<p>If <span class="math inline">\(X_1=1, \ldots, X_N=1\)</span>, and the sum of probabilities <span class="math inline">\(\sum_{i}^N \, \pi_i = 1\)</span>, then we can deduce this expression.</p>
<p><span class="math display">\[
\begin{align}
Y &= \alpha\,X \\
\operatorname{E}Y &= \operatorname{E}[\alpha\,X] \\
&= \sum_{i}^{N}[\pi_i\,\dot (\alpha\,X_i) ] \\
&= \pi_1\,\alpha\,(1) + \ldots \pi_N\,\alpha\,(1) \\
&= \alpha \, (\pi_1\,(1) + \ldots \pi_N\,(1) \\
&= \alpha\,\sum_{i}^{N}[\pi_i (1)] \\
&= \alpha\,\operatorname{E}1 \\
&= \alpha
\end{align}
\]</span>
This may have been immediately clear to some of us before the 7 step deduction, but we might find it reassuring that the deduction verifies, and perhaps validates, our initial conjecture. We also discover another relationship.</p>
<p><span class="math display">\[
\operatorname{E}1 = 1
\]</span>
In algebra we call this the identity operator. For any number or variable, or even another expectation, <span class="math inline">\(\alpha\)</span>, then this is true.</p>
<p><span class="math display">\[
\begin{align}
\alpha \, \operatorname{E}1 &= \alpha\, 1 \\
&= \alpha
\end{align}
\]</span>
Yes, this is identity under a multiplication. Is there a zero? Yes, <span class="math inline">\(\operatorname{E}0 = 0\)</span>, the identity operator under addition. Anything added to <span class="math inline">\(\operatorname{E}0=0\)</span> just returns itself.</p>
<p>What is the expectation of a sum of variables <span class="math inline">\(X\)</span> and <span class="math inline">\(Y\)</span>?</p>
<p><span class="math display">\[
\begin{align}
Z &= X+Y \\
\operatorname{E}Z &= \operatorname{E}[X + Y] \\
&= \sum_{i}^{N}[\pi_i\,(\,X_i + Y_i) ] \\
&= \pi_1\,\,(X_1 + Y_1) + \ldots \pi_N\,(X_N+Y_N) \\
&= (\pi_1\,X_1 + \ldots \pi_1\,X_N) + (\pi_N\,Y_N + \ldots \pi_N\,Y_N) \\
&= \sum_{i}^{N}[\pi_i\,(X_i) ] + \sum_{i}^{N}[\pi_i\,(Y_i) ] \\
&= \operatorname{E}X + \operatorname{E}Y
\end{align}
\]</span>
The expectation of a sum of outcome variables is the sum of the expectations of each variable.</p>
<p>We just examined a sum of two variables, so it behooves us to look at the product of two variables.</p>
<p><span class="math display">\[
\begin{align}
Z &= XY \\
\operatorname{E}Z &= \operatorname{E}[XY] \\
&= \sum_{i}^{N}[\pi_i\,(\,X_i\,Y_i) ] \\
&= \pi_1\,X_1 \, Y_1 + \ldots \pi_N\,X_N\,Y_N) \\
&= \operatorname{E}XY
\end{align}
\]</span>
Alas, we have reduced this operation to its simplest expression already. If <span class="math inline">\(Y=X\)</span>, going through the same steps as above we find this out.</p>
<p><span class="math display">\[
\begin{align}
if\,\,Z &= XY \\
and \\
Y &= X \\
then \\
Z&= XX\\
\operatorname{E}Z &= \operatorname{E}[XX] \\
&= \sum_{i}^{N}[\pi_i\,(\,X_i\,X_i) ] \\
&= \pi_1\,X_1 \, X_1 + \ldots \pi_N\,X_N\,X_N) \\
&= \pi_1\,X_1^2 + \ldots \pi_N\,X_N^2 \\
&= \operatorname{E}X^2
\end{align}
\]</span></p>
<p>It turns out that we can take an expression like this, <span class="math inline">\(Y=\alpha + \beta\,X\)</span>, multiply it by <span class="math inline">\(X\)</span> and, then operate on it with <span class="math inline">\(\operatorname{E} = \sum_{i}^{N} \pi_i \times\)</span> with the tools we now possess.</p>
<p><span class="math display">\[
\begin{align}
Y &=\alpha + \beta\,X \\
XY &= \alpha\,X + \beta\,XX \\
XY &= \alpha\,X + \beta\,X^2 \\
\operatorname{E}XY &= \operatorname{E}[\alpha\,X + \beta\,X^2] \\
&= \operatorname{E}[\alpha\,X] + \operatorname{E}[\beta\,X^2] \\
&= \alpha\,\operatorname{E}[X] + \beta\,\operatorname{E}[X^2]
\end{align}
\]</span>
This will be very useful indeed. We usually will call <span class="math inline">\(\operatorname{E}X = \mu_X\)</span> in honor of the <strong>mean</strong> of the population of all possible realizations of <span class="math inline">\(X\)</span>. We already know this as the weighted average of <span class="math inline">\(X\)</span> outcomes, where the weights are probabilities, all of which add up to 1. What about <span class="math inline">\(\operatorname{E}X^2\)</span>? To ponder this we consider the calculation of another very familiar metric, the square of the standard deviation, which has been dubbed the <strong>variance</strong>. We start with the definition and use all of the new tricks up our sleeves. We define variance as the probability weighted average of squared deviations of outcomes from the expected outcome.</p>
<p>We will need the remembrance of things in our algebraic past that look like this.</p>
<p><span class="math display">\[
\begin{align}
(a + b)^2 &= (a + b)(a + b) \\
&= a^2 + 2ab + b^2
\end{align}
\]</span></p>
<p>In what follows <span class="math inline">\(a = X\)</span> and <span class="math inline">\(b = -\operatorname{E}X\)</span>. We will also need to remember that <span class="math inline">\(-2b^2 + b^2 = -b^2\)</span>.</p>
<p><span class="math display">\[
\begin{align}
define \\
\sigma_X^2 &= Var(X) \\
then \\
Var(X) &= \operatorname{E}(X - \operatorname{E}X)^2 \\
&= \operatorname{E}(X^2 - 2X\operatorname{E}X + \operatorname{E}X^2) \\
&= \operatorname{E}X^2 - \operatorname{E}[2X\operatorname{E}X] + \operatorname{E}[\operatorname{E}X^2] \\
&= \operatorname{E}X^2 - 2(\operatorname{E}X)^2 + (\operatorname{E}X)^2 \\
&= \operatorname{E}X^2 - (\operatorname{E}X)^2 \\
&= \operatorname{E}X^2 - \mu_X^2 \\
thus \\
\sigma_{X}^2 &= \operatorname{E}X^2 - \mu_X^2 \\
rearranging \\
\operatorname{E}X^2 &= \sigma_{X}^2 + \mu_X^2
\end{align}
\]</span></p>
<p>Let’s now move on to the <em>piece de resistance</em> , <span class="math inline">\(\operatorname{E}XY\)</span>. We start with the definition of covariance, for this is where an <span class="math inline">\(XY\)</span> product resides.</p>
<p><span class="math display">\[
\begin{align}
define \\
\sigma_{XY} &= Cov(X, Y) \\
then \\
Cov(X, Y) &= \operatorname{E}(X - \operatorname{E}X)(Y - \operatorname{E}Y) \\
&= \operatorname{E}(XY - X\operatorname{E}Y - Y\operatorname{E}X + \operatorname{E}X\,\operatorname{E}Y) \\
&= \operatorname{E}(XY - \operatorname{E}X\,\operatorname{E}Y - \operatorname{E}Y\,\operatorname{E}X + \operatorname{E}X\,\operatorname{E}Y) \\
&= \operatorname{E}XY - 2\operatorname{E}X\,\operatorname{E}Y + \operatorname{E}\,X[\operatorname{E}Y \\
&= \operatorname{E}XY - \operatorname{E}X\,\operatorname{E}Y \\
thus \\
\sigma_{XY} &= \operatorname{E}XY - \mu_X\mu_Y \\
rearranging \\
\operatorname{E}XY &= \sigma_{XY} + \mu_X\mu_Y
\end{align}
\]</span></p>
<p>Now we can go to work on our model with one more stop: solving a simultaneous equation. This tool too will come in handy. We suppose we have the following two equations in <span class="math inline">\(a\)</span> and <span class="math inline">\(b\)</span>. We will use the row-column convention of subscripts. Thus coefficient <span class="math inline">\(c_{12}\)</span> will be in row 1, column 2 of a matrix. First the two equations.</p>
<p><span class="math display">\[
\begin{align}
c_{11}a + c_{12}b &= d_1 \\
c_{21}a + c_{22}b &= d_2
\end{align}
\]</span></p>
<p>In matrix form this is a very tidy arrangement like this.</p>
<p><span class="math display">\[
\begin{align}
\begin{bmatrix}
c_{11} & c_{12} \\
c_{21} & c_{22}
\end{bmatrix}
\begin{bmatrix}
a \\
b
\end{bmatrix}
&=
\begin{bmatrix}
d_1 \\
d_2
\end{bmatrix} \\
\mathrm{C}\mathrm{a} &= \mathrm{d}
\end{align}
\]</span></p>
<p>Very tidy indeed! We might remember that a unique solution exists only if (or is it if and only if?) when the determinant of the matrix <span class="math inline">\(\mathrm{C}\)</span> is not zero. If it is, then the solution <span class="math inline">\(\mathrm{a}=\mathrm{C}^{-1}d\)</span> does not exist and the model is singular. In what we will do below we will compose our coeffients of means, standard deviations and correlations. Some combinations of these aggregations, constants, will prove to yield a zero determinant, and a singular model results.</p>
<p>The determinant <span class="math inline">\(\det{\mathrm{C}}\)</span> is</p>
<p><span class="math display">\[
\det{\mathrm{C}} = c_{11}c_{22}-c_{12}c_{21}
\]</span>
The solution proceeds in two sweeps, one for each of <span class="math inline">\(a\)</span> and <span class="math inline">\(b\)</span>. In the first sweep we replace the first, the <span class="math inline">\(a\)</span> column, in <span class="math inline">\(\mathrm{C}\)</span> with the column vector <span class="math inline">\(d\)</span>. We find the determinant of this new <span class="math inline">\(\mathrm{C}_a\)</span> matrix and divide by <span class="math inline">\(\det{\mathrm{C}}\)</span>. Here we go.</p>
<p><span class="math display">\[
\begin{align}
original \, \, &\mathrm{C} \\
\begin{bmatrix}
c_{11} & c_{12} \\
c_{21} & c_{22}
\end{bmatrix} \\
swap\,\, out\,\, &first\,\, column \\
\mathrm{C}_a &=
\begin{bmatrix}
d_{1} & c_{12} \\
d_{2} & c_{22}
\end{bmatrix} \\
then \\
a &= \frac{\det{\mathrm{C_a}}}{\det{\mathrm{C}}} \\
&= \frac{d_1c_{22}-d_2c_{12}}{c_{11}c_{22}-c_{12}c_{21}}
\end{align}
\]</span></p>
<p>Now the second sweep in all its glory.</p>
<p><span class="math display">\[
\begin{align}
original \, \, &\mathrm{C} \\
\begin{bmatrix}
c_{11} & c_{12} \\
c_{21} & c_{22}
\end{bmatrix} \\
swap\,\, out\,\, &first\,\, column \\
\mathrm{C}_b &=
\begin{bmatrix}
c_{11} & d_{1} \\
c_{21} & d_2
\end{bmatrix} \\
then \\
b &= \frac{\det{\mathrm{C_b}}}{\det{\mathrm{C}}} \\
&= \frac{c_{11}d_2-c_{21}d_1}{c_{11}c_{22}-c_{12}c_{21}}
\end{align}
\]</span></p>
<p>Very much a formula for the ages.</p>
</div>
<div id="walking-the-straight-line" class="section level3" number="11.2.2">
<h3><span class="header-section-number">11.2.2</span> Walking the straight line</h3>
<p>Here is our model where both <span class="math inline">\(Y\)</span> and <span class="math inline">\(X\)</span> have some distribution with <span class="math inline">\(\pi\)</span> probabilities for each. Here we use <span class="math inline">\(\pi\)</span> as the Greek letter for <span class="math inline">\(p\)</span>, not as the <span class="math inline">\(\pi\)</span> of circle fame. Both <span class="math inline">\(Y\)</span> and <span class="math inline">\(X\)</span> are what we will very loosely call <strong>random variables</strong>, because they have outcomes with associated probabilities of occurrence.</p>
<p><span class="math display">\[
Y = \alpha + \beta\, X
\]</span>
We now ask the question, what is <span class="math inline">\(\operatorname{E(Y \mid X=x)=\mu_{Y \mid X}}\)</span>? What, on weighted average, can we expect <span class="math inline">\(Y\)</span> to be? First of all, this must be true.</p>
<p><span class="math display">\[
\begin{align}
if \\
\operatorname{E}(Y \mid X=x) &= \mu_{Y \mid X} \\
then \\
\mu_{Y \mid X} &= \operatorname{E}(\alpha + \beta\, X) \\
&= \operatorname{E}\alpha (1) + \operatorname{E}(\beta\,X) \\
&= \alpha\,\operatorname{E}1 + \beta\,\operatorname{E}X \\
&= \alpha\,(1) + \beta\,\mu_X \\
&= \alpha + \beta\,\mu_X
\end{align}
\]</span></p>
<p>Result one is in hand, <span class="math inline">\(\mu_{Y \mid X}= \alpha + \beta\,\mu_X\)</span> is a true statement according to our many deductions. By the way the statement <span class="math inline">\(\mu_{Y \mid X} = \alpha\,\operatorname{E}1 + \beta\,\operatorname{E}X\)</span> is an example of the distributive property of multiplication over addition.</p>
<p>Now for our second trick we multiply <span class="math inline">\(Y\)</span> by <span class="math inline">\(X\)</span> to get a second result and a second true statement. We will condense <span class="math inline">\(Y \mid X = Y\)</span> to save what’s left of our eyesight. We remember all of our hard work above, especially this inventory of results.</p>
<p><span class="math display">\[
\begin{align}
\operatorname{E}Y &= \mu_{Y} \\
\operatorname{E}X &= \mu_{X} \\
\operatorname{E}X^2 &= \sigma_{X}^2 + \mu_X^2 \\
\operatorname{E}XY &= \sigma_{XY} + \mu_X\mu_Y
\end{align}
\]</span></p>
<p>Using this inventory more than a few times we get these results.</p>
<p><span class="math display">\[
\begin{align}
Y &= \alpha + \beta\, X \\
then \\
XY &= \alpha\,X + \beta\, XX \\
&= \alpha\,X + \beta\, X^2 \\
so\,\,that \\
\operatorname{E}XY &= \operatorname{E}(\alpha\,X + \beta\, X^2) \\
&= \operatorname{E}\alpha\,X + \operatorname{E}\beta\,X^2 \\
&= \alpha\,\operatorname{E}X + \beta\,\operatorname{E}X^2 \\
&= \alpha\,\mu_X + \beta\,(\sigma_X^2 + \mu_X^2) \\
but\,\,we\,\,know\,\,that \\
\operatorname{E}XY &= \sigma_{XY} + \mu_X\mu_Y \\
thus,\,\, again \\
\sigma_{XY} + \mu_X\mu_Y &= \alpha\,\mu_X + \beta\,(\sigma_X^2 + \mu_X^2)
\end{align}
\]</span></p>
<p>We now have two equations in two, as yet to be determined, unknowns. They are unobserved data, <span class="math inline">\(\alpha\)</span> and <span class="math inline">\(\beta\)</span>. Both equations are true, and true jointly. This means we can stack one on top of the other as a simultaneous equation system and, we hope this time, solve them for unique values of <span class="math inline">\(\alpha\)</span> and <span class="math inline">\(\beta\)</span>. Yes, we demand a formula!</p>
<p>Here are the two equations with <span class="math inline">\(\alpha\)</span> and <span class="math inline">\(\beta\)</span> terms on the left-hand side and constant terms, the expectations are all constant aggregations, on the right-hand side of the equation. We also commutes the terms so that our unknowns are pre-multiplied by coefficients.</p>
<p><span class="math display">\[
\begin{align}
\alpha + \mu_X\,\beta &= \mu_Y \\
\mu_X\,\alpha + (\sigma_X^2 + \mu_X^2)\,\beta &= \sigma_{XY} + \mu_X\mu_Y
\end{align}
\]</span></p>
<p>The matrix representation will help us easily match coefficients with our simultaneous equation model, way above as we replicate below.</p>
<p><span class="math display">\[
\begin{align}
\begin{bmatrix}
c_{11} & c_{12} \\
c_{21} & c_{22}
\end{bmatrix}
\begin{bmatrix}
a \\
b
\end{bmatrix}
&=
\begin{bmatrix}
d_1 \\
d_2
\end{bmatrix} \\
\mathrm{C}\mathrm{a} &= \mathrm{d}
\end{align}
\]</span></p>
<p>Our simultaneous equations of expected values for the linear model <span class="math inline">\(Y=\alpha+\beta X\)</span> yields this structure.</p>
<p><span class="math display">\[
\begin{align}
\alpha + \mu_X\,\beta &= \mu_Y \\
\mu_X\,\alpha + (\sigma_X^2 + \mu_X^2)\,\beta &= \sigma_{XY} + \mu_X\mu_Y \\
becomes \\
\begin{bmatrix}
1 & \mu_X \\
\mu_X & \sigma_X^2 + \mu_X^2
\end{bmatrix}
\begin{bmatrix}
\alpha \\
\beta
\end{bmatrix}
&=
\begin{bmatrix}
\mu_Y \\
\sigma_{XY} + \mu_X\mu_Y
\end{bmatrix} \\
\mathrm{C}\mathrm{a} &= \mathrm{d}
\end{align}
\]</span>
We can solve for the unobserved, unknown, and otherwise conjectured (we might smell a hypothesis brewing here) <span class="math inline">\(\alpha\)</span> and <span class="math inline">\(\beta\)</span> using our trusty determinant solutions.</p>
<p><span class="math display">\[
\begin{align}
\alpha &= \frac{\mu_Y(\sigma_x^2 + \mu_X^2) - \mu_X(\sigma_{XY} + \mu_X\mu_Y)}{\sigma_X^2 + \mu_X^2 - \mu_x^2} \\
&= \frac{\mu_Y\sigma_X^2 - \mu_X\sigma_{XY}}{\sigma_X^2} \\
&= \mu_Y - \mu_X\frac{\sigma_{XY}}{\sigma_X^2}\\
and\,\, then \\
\beta &= \frac{\det{\mathrm{C_{\beta}}}}{\det{\mathrm{C}}} \\
&= \frac{c_{11}d_2-c_{21}d_1}{c_{11}c_{22}-c_{12}c_{21}} \\
&= \frac{\sigma_{XY} + \mu_X\mu_Y - \mu_X\mu_Y}{\sigma_X^2 + \mu_X^2 - \mu_x^2} \\
&= \frac{\sigma_{XY}}{\sigma_X^2}
\end{align}
\]</span></p>
<p>Yeow! All that work to get at this simplification all due to the wonderful result that <span class="math inline">\(\alpha\)</span> has <span class="math inline">\(\beta = \sigma_{XY}/\sigma_X^2\)</span> in it.</p>
<p><span class="math display">\[
\begin{align}
\operatorname{E}(Y \mid X) &= \alpha + \beta\,X \\
\operatorname{E}(Y \mid X) &= \left(\mu_Y - \mu_X\frac{\sigma_{XY}}{\sigma_X^2}\right) + \frac{\sigma_{XY}}{\sigma_X^2}\,X \\
rearranging\,\,terms\\
\operatorname{E}(Y \mid X) &= \mu_Y + \frac{\sigma_{XY}}{\sigma_X^2}(X - \mu_X)
\end{align}
\]</span></p>
<p>The second formulation is also the basis for the vaunted Capital Asset Pricing Model in finance, where <span class="math inline">\(Y\)</span> is the return on a security (stock, bond, etc.) and <span class="math inline">\(X\)</span> is the return on a market index (e.g., S&P 500).</p>
<p>We have, yes, one more stop, before we drop. The definition of correlation is here.</p>
<p><span class="math display">\[
\rho = \frac{\sigma_{XY}}{\sigma_X\,\sigma_Y}
\]</span>
We can use this to rearrange the deck chairs on this Titanic of a beast of gnarly maths (all algebra! and barely a faint odor of calculus?).</p>
<p><span class="math display">\[
\begin{align}
if \\
\rho &= \frac{\sigma_{XY}}{\sigma_X\,\sigma_Y} \\
then \\
\sigma_{XY} &= \rho\,\sigma_X\,\sigma_Y\\
thus \\
\beta &= \frac{\sigma_{XY}}{\sigma_X^2} \\
&= \frac{\rho\,\sigma_X\,\sigma_Y}{\sigma_X^2} \\
&= \frac{\rho\,\sigma_Y}{\sigma_X}
\end{align}
\]</span>
We need numbers, fast. But we should hold on. One more calculation to make. After we have the mean, but what about the conditional standard deviation?</p>
</div>
<div id="a-short-variance-diatribe" class="section level3" number="11.2.3">
<h3><span class="header-section-number">11.2.3</span> A short variance diatribe</h3>
<p>Here we take the standard deviation as given, perhaps at our peril. The variance of waiting times is</p>
<p><span class="math display">\[
Var(Y \mid X) = (1-\rho^2)\sigma_Y^2
\]</span></p>
<p>How do we get this? A lot easier than the preceding. There are no simultaneous equations to worry about. Here’s the algebra.</p>
<p><span class="math display">\[
\begin{align}
Var(Y \mid X) &= \operatorname{E}[Y - \operatorname{E}(Y \mid X)]^2 \\
&= \operatorname{E}[Y - (\mu_Y + \frac{\sigma_{XY}}{\sigma_X^2}(X - \mu_X))]^2 \\
&= \operatorname{E}[(Y- \mu_Y) + \frac{\sigma_{XY}}{\sigma_X^2}(X - \mu_X)]^2 \\
&= \operatorname{E}[(Y- \mu_Y)^2 + \frac{\rho_{XY}^2\sigma_Y^2}{\sigma_X^2}(X - \mu_X)^2 - 2\frac{\rho_{XY}\sigma_Y}{\sigma_X}(Y- \mu_Y)(X - \mu_X)] \\
&= \operatorname{E}[(Y- \mu_Y)^2] + \frac{\rho_{XY}^2\sigma_Y^2}{\sigma_X^2}\operatorname{E}[(X - \mu_X)^2] - 2\frac{\rho_{XY}\sigma_Y}{\sigma_X}\operatorname{E}[(Y- \mu_Y)(X - \mu_X)] \\
&= \sigma_Y^2 + \frac{\rho_{XY}^2\sigma_Y^2}{\sigma_X^2} \sigma_X^2 - 2\frac{\rho_{XY}\sigma_Y}{\sigma_X}(\rho_{XY}\sigma_X\,\sigma_Y) \\
&= \sigma_Y^2 - \rho_{XY}^2\sigma_Y^2 \\
&= (1 - \rho_{XY}^2)\sigma_Y^2
\end{align}
\]</span>
Done! If the joint distribution of <span class="math inline">\(X\)</span> and <span class="math inline">\(Y\)</span> is Gaussian, then we can generate <span class="math inline">\(Y \mid X \sim \operatorname{N}( \alpha + \beta X, (1 - \rho_{XY}^2)\sigma_Y^2)\)</span>. Now we can infer <span class="math inline">\(Y\)</span> behavior.</p>
</div>
</div>
<div id="does-education-matter" class="section level2" number="11.3">
<h2><span class="header-section-number">11.3</span> Does education matter?</h2>
<p>Finally, some numbers? Suppose this is the data and an Excel trendline through the scatter plot for wages and education from way long ago.</p>
<p><img src="images/13/wages-educ-data-plot.jpg" /></p>
<p>Excel calculates an intercept <span class="math inline">\(\alpha = -35.47\)</span>, and slope <span class="math inline">\(\beta=3.834\)</span>.</p>
<p>Here we calculate intercepts and slopes based on the expectation of wages conditional on education level.</p>
<p><img src="images/13/wage=educ-calculations.jpg" /></p>
<p>The calculations align exactly with Excel’s view of the universe.</p>
</div>
<div id="back-to-the-business-at-hand" class="section level2" number="11.4">
<h2><span class="header-section-number">11.4</span> Back to the business at hand</h2>
<p>We used all of that math to understand the ins and outs of conditional expectations. The condition influences at least the expectation, it also influences the conditional standard deviation. Here we take the standard deviation as given, perhaps at our peril. The variance of waiting times is</p>
<p><span class="math display">\[
Var(Y \mid X) = (1-\rho^2)\sigma_Y^2
\]</span>
Here’s the question before us: if we spend more years being educated, do we have a higher wage? We can mold thi into two hypotheses to put a point on it. How do wages compare between 18 years of schooling (4 years post-secondary education) and 16 years (secondary education)?</p>
<p><span class="math display">\[
\begin{align}
H_{college}:&\,\, \mu_{college} &= \alpha + \beta (16), \, &&&with \, Pr(H_{college}) &= p \\
H_{high school}:&\,\, \mu_{highschool} &= \alpha + \beta (12), \, &&&with \, Pr(H_{highschool}) &= 1-p
\end{align}
\]</span></p>
<p>Let’s compute some parameters.</p>
<p><img src="images/13/wage=educ-calculations.jpg" /></p>
<p>It is now a matter of depositing these values into our hypothesis testing model, the one we used with waiting times, modified for wages and education.</p>
<p><img src="images/13/wage-educ-hypo-test-1.jpg" /></p>
<p>We have a perfectly symmetrical solution for the equal probability, even odds, experiment. Consistent with this sample only, 16 years of education seems to work for a wage less than $18.20/hour. Any wage greater than that is consistent with 18 years of education, in this sample, and probably so.</p>
<p>Oh, and yes education matters, at least financially, and in this sample, probably so.</p>
</div>
<div id="does-it-really-matter" class="section level2" number="11.5">
<h2><span class="header-section-number">11.5</span> Does it really matter?</h2>
<p>Here is another hypothesis test. Suppose we are still wary of all of the math and its interpretation and even the data and anecdotal, personal, experience. The skeptic says educational attainment does not matter. The critical thinker says, let’s use data to help us understand whether the skeptic’s claims are true, probably.</p>
<p>Here are binary hypotheses for us to consider. We whittle the skeptic down to a level of educational attainment the skeptic can live with, at least for this test. The level is <span class="math inline">\(E=12\)</span> years for a yes answer. The skeptic also agrees to the same sample we used before and no means <span class="math inline">\(\beta=0\)</span>. For the skeptic, and the model, this means that <span class="math inline">\(E\)</span> has no impact, no relationship with wages.</p>
<p><span class="math display">\[
\begin{align}
H_{no}:\,\, \mu_{no} &= \alpha, \, &&with \, Pr(H_{no}) = p \\
H_{yes}:\,\, \mu_{yes} &= \alpha + \beta (12), \, &&with \, Pr(H_{yes}) = 1-p
\end{align}
\]</span></p>
<p>The <span class="math inline">\(\mu_{no}\)</span> and <span class="math inline">\(\mu_{yes}\)</span> are the results of two different ways, two different conjectures, two different models of behavior. We build no and yes into our computational model in the next round. The problem is that in this sample, the intercept <span class="math inline">\(\alpha < 0\)</span>.</p>
</div>
<div id="references-and-endnotes-4" class="section level2" number="11.6">
<h2><span class="header-section-number">11.6</span> References and endnotes</h2>
</div>
</div>
<div class="footnotes">
<hr />
<ol start="1">
<li id="fn1"><p>We could do very well to check out this [Excel implementation of an entropic, almost Bayesian, approach to classification.[(<a href="https://pubsonline.informs.org/doi/10.1287/ited.1100.0060" class="uri">https://pubsonline.informs.org/doi/10.1287/ited.1100.0060</a>) What is entropy? From thermodynamic irregularities to the <a href="https://clinicalview.gehealthcare.com/quick-guide/entropy-monitoring-valuable-tool-guiding-delivery-anesthesia">delivery of anaesthesia</a>, entropy can measure chaos. It has a lot to do with the <span class="math inline">\(log((1-p)/p)\)</span> portion of the 1:1 odds threshold we discussed.<a href="relationships-put-to-the-test.html#fnref1" class="footnote-back">↩︎</a></p></li>
<li id="fn2"><p><a href="https://en.wikipedia.org/wiki/Cox%27s_theorem">Cox’s theorem</a> provides a logical underpinning to this statement: the rules of probability theory need not be derived from a definition of probabilities as relative frequencies (frequentist approach). He goes further to show that the properties of probability as logic but also follow from certain properties one might desire of any system of plausible reasoning about uncertainty. <a href="https://reader.elsevier.com/reader/sd/pii/S0888613X03000513?token=7D6FBDB41F8831DD4DE22162DFA2FAA3A8786A9B0CAB6231C759CF725E203785164FE0BAE53EFCB46143DF016439BCD6&originRegion=us-east-1&originCreation=20211123164351">Van Horn</a> is a tutorial on Cox’s approach. Plausible reasoning may be illustrated with this example of the distinction between gradual degrees of possible outcomes, that is, uncertainty, and what is or is not, that is, truth. As an example, one’s confidence in the statement <em>Daniel is well over six feet tall</em>, after seeing Daniel, legs splayed out, sitting at a desk, is a degree of plausibility. In contrast, the statement <em>Daniel is tall</em> may be somewhat true (if Daniel measures five feet eleven inches and the definition of tall is greater than or equal to six feet) or entirely true (if Daniel measures seven feet one inch).<a href="relationships-put-to-the-test.html#fnref2" class="footnote-back">↩︎</a></p></li>
</ol>
</div>
</section>
</div>
</div>
</div>
<a href="part-four-the-test-of-a-relationship.html" class="navigation navigation-prev " aria-label="Previous page"><i class="fa fa-angle-left"></i></a>
<a href="the-journey-continues.html" class="navigation navigation-next " aria-label="Next page"><i class="fa fa-angle-right"></i></a>
</div>
</div>
<script src="libs/gitbook-2.6.7/js/app.min.js"></script>
<script src="libs/gitbook-2.6.7/js/clipboard.min.js"></script>
<script src="libs/gitbook-2.6.7/js/plugin-search.js"></script>
<script src="libs/gitbook-2.6.7/js/plugin-sharing.js"></script>
<script src="libs/gitbook-2.6.7/js/plugin-fontsettings.js"></script>
<script src="libs/gitbook-2.6.7/js/plugin-bookdown.js"></script>
<script src="libs/gitbook-2.6.7/js/jquery.highlight.js"></script>
<script src="libs/gitbook-2.6.7/js/plugin-clipboard.js"></script>
<script>
gitbook.require(["gitbook"], function(gitbook) {
gitbook.start({
"sharing": {
"github": false,
"facebook": true,
"twitter": true,
"linkedin": false,
"weibo": false,
"instapaper": false,
"vk": false,
"whatsapp": false,
"all": ["facebook", "twitter", "linkedin", "weibo", "instapaper"]
},
"fontsettings": {
"theme": "white",
"family": "sans",
"size": 2
},
"edit": {
"link": null,
"text": null
},
"history": {
"link": null,
"text": null
},
"view": {
"link": null,
"text": null
},
"download": ["book-probability.pdf", "book-probability.epub"],
"search": {
"engine": "fuse",
"options": null
},
"toc": {
"collapse": "subsection"
}
});
});
</script>
<!-- dynamically load mathjax for compatibility with self-contained -->
<script>
(function () {
var script = document.createElement("script");
script.type = "text/javascript";
var src = "true";
if (src === "" || src === "true") src = "https://mathjax.rstudio.com/latest/MathJax.js?config=TeX-MML-AM_CHTML";
if (location.protocol !== "file:")
if (/^https?:/.test(src))
src = src.replace(/^https?:/, '');
script.src = src;
document.getElementsByTagName("head")[0].appendChild(script);
})();
</script>
</body>
</html>