aboutsummaryrefslogtreecommitdiff
path: root/tex/src/paper-long.tex
blob: 6d74e1774c61a9533c76816e8016d0922807e50d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
\documentclass[10.5pt]{article}

%% This is a convenience variable if you are using PGFPlots to build plots
%% within LaTeX. If you want to import PDF files for figures directly, you
%% can use the standard `\includegraphics' command. See the definition of
%% `\includetikz' in `tex/preamble-pgfplots.tex' for where the files are
%% assumed to be if you use `\includetikz' when `\makepdf' is not defined.
\newcommand{\makepdf}{}

%% When defined (value is irrelevant), `\highlightchanges' will cause text
%% in `\tonote' and `\new' to become colored. This is useful in cases that
%% you need to distribute drafts that is undergoing revision and you want
%% to hightlight to your colleagues which parts are new and which parts are
%% only for discussion.
\newcommand{\highlightchanges}{}

%% Import the necessary preambles.
\input{tex/src/preamble-style.tex}
\input{tex/build/macros/project.tex}
\input{tex/src/preamble-pgfplots.tex}
\input{tex/src/preamble-biblatex.tex}





\title{Maneage: Customizable Template for Managing Data Lineage}
\author{\large\mpregular \authoraffil{Mohammad Akhlaghi}{1,2},
        \large\mpregular \authoraffil{Ra\'ul Infante-Sainz}{1,2}\\
  {
    \footnotesize\mplight
    \textsuperscript{1} Instituto de Astrof\'isica de Canarias, C/V\'ia L\'actea, 38200 La Laguna, Tenerife, ES.\\
    \textsuperscript{2} Facultad de F\'isica, Universidad de La Laguna, Avda. Astrof\'isico Fco. S\'anchez s/n, 38200, La Laguna, Tenerife, ES.\\
    Corresponding author: Mohammad Akhlaghi
    (\href{mailto:mohammad@akhlaghi.org}{\textcolor{black}{mohammad@akhlaghi.org}})
  }}
\date{}





\begin{document}%\layout
\thispagestyle{firstpage}
\maketitle

%% Abstract
{\noindent\mpregular
  The era of big data has also ushered an era of big responsibility.
  Without it, the integrity of the results will be a subject of perpetual debate.
  In this paper, Maneage (management + lineage) is introduced as a low-level solution.
  Maneage is a publishing and archival friendly data lineage management system (in machine-actionable plain-text) for projects in the sciences or industry.
  Its core principles include: stand-alone (e.g., not requiring anything beyond a POSIX-compatible system, administrator privileges, or a network connection), modular, straight-forward design, traceable input and output, temporal lineage/provenance and free software (for scientific applications).
  A project that uses Maneage will be able to publish the complete data lineage, making it exactly reproducible (as a test on sufficiently conveying the data lineage).
  The offered lineage/control isn't limited downloading the raw input data and processing them automatically, but also includes building the necessary data analyze software with fixed versions and build configurations.
  Additionally, Maneage also includes the final PDF report of the project, establishing direct links between the data analysis and the narrative (with the precision of sentence).
  Maneage enables incremental projects, where a new project can branch off an existing one making only moderate changes and experimentation on the methods.
  It can also be used on more ambitious projects once a sufficiently large number of projects use it, for example automatic workflow creation through machine learning tools, or automating data management plans.
  As a demonstration, this paper is written using Maneage (snapshot \projectversion).
  \horizontalline

  \noindent
  {\mpbold Keywords:} Data Lineage, Data Provenance, Reproducibility, Scientific Pipelines, Workflows
}

\horizontalline










\section{Introduction}
\label{sec:introduction}

The increasing volume and complexity of data analysis has been highly productive, giving rise to a new branch of ``Big Data'' in many fields of the sciences and industry.
However, given its inherent complexity, the mere results are barely useful alone.
Questions such as these commonly follow any such result:
What inputs were used?
What operations were done on those inputs? How were the configurations or training data chosen?
How did the quantitative results get visualized into the final demonstration plots, figures or narrative/qualitative interpretation?
May there be a bias in the visualization?
See Figure \ref{fig:questions} for a more detailed visual representation of such questions for various stages of the workflow.

In data science and database management, this type of metadata are commonly known as \emph{data provenance} or \emph{data lineage}.
Their definitions are elaborated with other basic concepts in Section \ref{sec:definitions}.
Data lineage is being increasingly demanded for integrity checking from both the scientific and industrial/legal domains.
Notable examples in each domain are respectively the ``Reproducibility crisis'' in the sciences that was claimed by the Nature journal \citep{baker16}, and the General Data Protection Regulation (GDPR) by the European Parliament and the California Consumer Privacy Act (CCPA), implemented in 2018 and 2020 respectively.
The former argues that reproducibility (as a test on sufficiently conveying the data lineage) is necessary for other scientists to study, check and build-upon each other's work.
The latter requires the data intensive industry to give individual users control over their data, effectively requiring thorough management and knowledge of the data's lineage.
Besides regulation and integrity checks, having a robust data governance (management of data lineage) in a project can be very productive: it enables easy debugging, experimentation on alternative methods, or optimization of the workflow.

In the sciences, the results of a project's analysis are published as scientific papers which have also been the primary conveyor of the result's lineage: usually in narrative form, within the ``Methods'' section of the paper.
From our own experiences, this section is usually most discussed during peer review and conference presentations, showing its importance.
After all, a result is defined as ``scientific'' based on its \emph{method} (the ``scientific method''), or lineage in data-science terminology.
In the industry however, data governance is usually kept as a trade secret and isn't publicly published or scrutinized.
Therefore while the proposed approach introduced in this paper (Maneage) is also useful in industrial contexts, the main practical focus would be in the scientific front which has traditionally been more open to publishing the methods and anonymous peer scrutiny.

\begin{figure}[t]
  \begin{center}
    \includetikz{figure-project-outline}
  \end{center}
  \vspace{-17mm}
  \caption{\label{fig:questions}Graph of a generic project's workflow (connected through arrows), highlighting the various issues/questions on each step.
    The green boxes with sharp edges are inputs and the blue boxes with rounded corners are the intermediate or final outputs.
    The red boxes with dashed edges highlight the main questions on the respective stage.
    The orange box surrounding the software download and build phases marks shows the various commonly recognized solutions to the questions in it, for more see Appendix \ref{appendix:jobmanagement}.
  }
\end{figure}

The traditional format of a scientific paper has been very successful in conveying the method with the result in the last centuries.
However, the complexity mentioned above has made it impossible to describe all the analytical steps of a project to a sufficient level of detail.
Citing this difficulty, many authors suffice to describing the very high-level generalities of their analysis, while even the most basic calculations (like the mean of a distribution) can depend on the software implementation.

Due to the complexity of modern scientific analysis, a small deviation in the final result can be due to many different steps, which may be significant.
Publishing the precise codes of the analysis is the only guarantee.
For example, \citet{smart18} describes how a 7-year old conflict in theoretical condensed matter physics was only identified after the relative codes were shared.
Nature is already a black box which we are trying hard to unlock, or understand.
Not being able to experiment on the methods of other researchers is an artificial and self-imposed black box, wrapped over the original, and taking most of the energy of researchers.

\citet{miller06} found that a mistaken column flipping, leading to retraction of 5 papers in major journals, including Science.
\citet{baggerly09} highlighted the inadequate narrative description of the analysis and showed the prevalence of simple errors in published results, ultimately calling their work ``forensic bioinformatics''.
\citet{herndon14} and \citet[a self-correction]{horvath15} also reported similar situations and \citet{ziemann16} concluded that one-fifth of papers with supplementary Microsoft Excel gene lists contain erroneous gene name conversions.
Such integrity checks tests are a critical component of the scientific method, but are only possible with access to the data and codes.

The completeness of a paper's published metadata (or ``Methods'' section) can be measured by a simple question: given the same input datasets (supposedly on a third-party database like \href{http://zenodo.org}{zenodo.org}), can another researcher reproduce the exact same result automatically, without needing to contact the authors?
Several studies have attempted to answer this with different levels of detail.
For example \citet{allen18} found that roughly half of the papers in astrophysics don't even mention the names of any analysis software they have used, while \citet{menke20} found that the fraction of papers explicitly mentioning their tools/software has greatly improved in medical journals over the last two decades.

\citet{ioannidis2009} attempted to reproduce 18 published results by two independent groups but, only fully succeeded in 2 of them and partially in 6.
\citet{chang15} attempted to reproduce 67 papers in well-regarded economic journals with data and code: only 22 could be reproduced without contacting authors, and more than half could not be replicated at all.
\citet{stodden18} attempted to replicate the results of 204 scientific papers published in the journal Science \emph{after} that journal adopted a policy of publishing the data and code associated with the papers.
Even though the authors were contacted, the success rate was $26\%$.
Generally, this problem is unambiguously felt in the community: \citet{baker16} surveyed 1574 researchers and found that only $3\%$ did not see a ``reproducibility crisis''.

This is not a new problem in the sciences: in 2011, Elsevier conducted an ``Executable Paper Grand Challenge'' \citep{gabriel11}.
The proposed solutions were published in a special edition.
Some of them are reviewed in Appendix \ref{appendix:existingsolutions}, but most have not been continued since then.
Before that, \citet{ioannidis05} proved that ``most claimed research findings are false''.
In the 1990s, \citet{schwab2000, buckheit1995, claerbout1992} describe this same problem very eloquently and also provided some solutions that they used.
While the situation has improved since the early 1990s, these papers still resonate strongly with the frustrations of today's scientists.
Even earlier, through his famous quartet, \citet{anscombe73} qualitatively showed how distancing of researchers from the intricacies of algorithms/methods can lead to misinterpretation of the results.
One of the earliest such efforts we found was \citet{roberts69} who discussed conventions in FORTRAN programming and documentation to help in publishing research codes.

From a practical point of view, for those who publish the data lineage, a major problem is the fast evolving and diverse software technologies and methodologies that are used by different teams in different epochs.
\citet{zhao12} describe it as ``workflow decay'' and recommend preserving these auxiliary resources.
But in the case of software its not as straightforward as data: if preserved in binary form, software can only be run on certain hardware and if kept as source-code, their build dependencies and build configuration must also be preserved.
\citet{gronenschild12} specifically study the effect of software version and environment and encourage researchers to not update their software environment.
However, this is not a practical solution because software updates are necessary, at least to fix bugs in the same research software.
Generally, software is not a secular component of projects, where one software can easily be swapped with another.
Projects are built around specific software technologies, and research in software methods and implementations is itself a vibrant research topic in many domains \citep{dicosmo19}.

\tonote{add a short summary of the advantages of Maneage.}

This paper introduces Maneage as a solution to these important issues.
Section \ref{sec:definitions} defines the necessary concepts and terminology used in this paper leading to a discussion of the necessary guiding principles in Section \ref{sec:principles}.
Section \ref{sec:maneage} introduces the implementation of Maneage, going into lower-level details in some cases.
Finally, in Section \ref{sec:discussion}, the future prospects of using systems like this template are discussed.
After the main body, Appendix \ref{appendix:existingtools} reviews the most commonly used lower-level technologies used today.
In light of the guiding principles, in Appendix \ref{appendix:existingsolutions} a critical review of many workflow management systems that have been introduced over the last three decades is given.
Finally, in Appendix \ref{appendix:softwareacknowledge} we acknowledge the various software (with a name and version number) that were used for this project.




















\section{Definition of important terms}
\label{sec:definitions}

The concepts and terminologies of reproducibility and project/workflow management and design are commonly used differently by different research communities or different solution provides.
As a consequence, before starting with the technical details it is important to clarify the specific terms used throughout this paper and its appendix.





\subsection{Definition: input}
\label{definition:input}
Any computer file that may be usable in more than one project.
The inputs of a project include data, software source code, etc. (see \citet{hinsen16} on the fundamental similarity of data and source code).
Inputs may be encoded in plain text (for example tables of comma-separated values, CSV, or processing scripts), custom binary formats (for example JPEG images), or domain-specific data formats \citep[e.g., FITS in astronomy, see][]{pence10}.

Inputs may have initially been created/written (e.g., software source code) or collected (e.g., data) for one specific project.
However, they can, and most often will, be used in other/later projects also.
Following the principle of modularity, it is therefore optimal to treat the inputs of any project as independent entities, not mixing them with how they are managed (how software is run on the data) within the project (see Section \ref{definition:project}).

Inputs are nevertheless necessary for building and running any project.
Some inputs may already archived/published independently prior to the project's publication.
In this case, they can easily be downloaded and used by independent projects.
Otherwise, they can be published with the project, but as independent files, for example see \href{https://doi.org/10.5281/zenodo.3408481}{zenodo.3408481} \citep{akhlaghi19}.





\subsection{Definition: output}
\label{definition:output}
Any computer file that is published at the end of the project.
The output(s) can be datasets (terabyte-sized, small table(s) or image(s), a single number, a true/false (Boolean) outcome), automatically generated software source code, or any other file.
The raw output files are commonly supplemented with a paper/report that summarizes them in a human-friendly readable/printable/narrative format.
The report commonly includes highlights of the input/output datasets (or intermediate datasets) as plots, figures, tables or simple numbers blended into the text.

The outputs can either be published independently on data servers which assign specific persistent identifiers (PIDs) to be cited in the final report or published paper (in a journal for example).
Alternatively, the datasets can be published with the project source, for example \href{https://doi.org/10.5281/zenodo.1164774}{zenodo.1164774} \citep[Sections 7.3 \& 3.4]{bacon17}.





\subsection{Definition: project}
\label{definition:project}
The most high-level series of operations that are done on input(s) to produce the output(s).
Because the project's report is also defined as an output (see above), besides the high-level analysis, the project's source also includes scripts/commands to produce plots, figures or tables.

With this definition, this concept of a ``project'' is similar to ``workflow''.
However, it is important to emphasize that the project's source code and inputs are distinct entities.
For example the project may be written in the same programming language as one analysis step.
Generally, the project source is defined as the most high-level source file that is unique to that individual project (its language is irrelevant).
The project is thus only in charge of managing the inputs and outputs of each analysis step (take the outputs of one step, and feed them as inputs to the next), not to do analysis by itself.
A good project will follow the modularity principle: analysis scripts should be well-defined as an independently managed software source.
For example modules in Python, packages in R, or libraries/programs in C/C++ that can be imported in higher-level project sources.




\subsection{Definition: data provenance}
\label{definition:provenance}

Data provenance is a very generic term which points to slightly different technical concepts in different fields like databases, storage systems and scientific workflows.
For example within a database, an SQL query from a relational database connects a subset of the database entries to the output (\emph{why-} provenance), their more detailed dependency (\emph{how-} provenance) and the precise location of the input sources (\emph{where-} provenance), for more see \citet{cheney09}.
In scientific workflows, provenance goes beyond a single database and its datasets, but may includes many databases that aren't directly linked, the higher-level project specific analysis that is done on the data, and linking of the analysis to the text of the paper, for example see \citet{bavoil05, moreau08, malik13}.

Here, we define provenance to be the common factor of the usages above: a dataset's provenance is the set of metadata (in any ontology, standard or structure) that connect it to the components (other datasets or scripts) that produced it.
Data provenance thus provides a high-level view of the data's genealogy.

\subsection{Definition: data lineage}
\label{definition:lineage}

% This definition is inspired from https://stackoverflow.com/questions/43383197/what-are-the-differences-between-data-lineage-and-data-provenance:

% "data provenance includes only high level view of the system for business users, so they can roughly navigate where their data come from.
% It's provided by variety of modeling tools or just simple custom tables and charts.
% Data lineage is a more specific term and includes two sides - business (data) lineage and technical (data) lineage.
% Business lineage pictures data flows on a business-term level and it's provided by solutions like Collibra, Alation and many others.
% Technical data lineage is created from actual technical metadata and tracks data flows on the lowest level - actual tables, scripts and statements.
% Technical data lineage is being provided by solutions such as MANTA or Informatica Metadata Manager. "
Data lineage is commonly used interchangeably with Data provenance \citep[for example][\tonote{among many others, just search ``data lineage'' in scholar.google.com}]{cheney09}.
However, for clarity, in this paper we refer to the term ``Data lineage'' as a low-level and fine-grained recording of the data's source, and operations that occur on it, down to the exact command that produced each intermediate step.
This \emph{recording} does not necessarily have to be in a formal metadata model.
But data lineage must be complete (see completeness principle in Section \ref{principle:complete}), and allow extraction of data provenance metadata, and thus higher-level operations like visualization of the workflow.


\subsection{Definition: reproducibility and replicability}
\label{definition:reproduction}
These terms have been used in the literature with various meanings, sometimes in a contradictory way.
It is therefore necessary to clarify the precise usage of this term in this paper.
But before that, it is important to highlight that in this paper we are only considering computational analysis. In other words, analysis after data has been collected and stored as a file on a filesystem.
Therefore, many of the definitions reviewed in \citet{plesser18}, that are about data collection, are out of context here.
We adopt the same definition of \citet{leek17,fineberg19}, among others:

%% From Zahra Sharbaf:
%% According to a U.S. National Science Foundation (NSF), the definition of reproducibility is “reproducibility refers to the ability of a researcher to duplicate the results of a prior study using the same materials as were used by the original investigator.
%% That is, a second researcher might use the same raw data to build the same analysis files and implement the same statistical analysis in an attempt to yield the same results….
%% Reproducibility is a minimum necessary condition for a finding to be believable and informative.”(K. Bollen, J. T. Cacioppo, R. Kaplan, J. Krosnick, J. L. Olds, Social, Behavioral, and Economic Sciences Perspectives on Robust and Reliable Science (National Science Foundation, Arlington, VA, 2015)).

\begin{itemize}
\item {\bf\small Reproducibility:} (same inputs $\rightarrow$ consistent result).
  Formally: ``obtaining consistent [not necessarily identical] results using the same input data; computational steps, methods, and code; and conditions of analysis'' \citep{fineberg19}.
  This is thus synonymous with ``computational reproducibility''.

  \citet{fineberg19} allow non-bitwise or non-identical numeric outputs within their definition of reproducibility, but they also acknowledge that this flexibility can lead to complexities: what is an acceptable non-identical reproduction?
  Exactly reproducible outputs can be precisely and automatically verified without statistical interpretations, even in a very complex analysis (involving many CPU cores, and random operations), see Section \ref{principle:verify}.
  It also requires no expertise, as \citet{claerbout1992} put it: ``a clerk can do it''.
  \tonote{Raul: I don't know if this is true... at least it needs a bit of training and an extra time. Maybe remove last phrase?}
  In this paper, unless otherwise mentioned, we only consider bitwise/exact reproducibility.

\item {\bf\small Replicability:} (different inputs $\rightarrow$ consistent result).
  Formally: ``obtaining consistent results across studies aimed at answering the same scientific question, each of which has obtained its own data'' \citep{fineberg19}.

Generally, since replicability involves new data collection, it can be expensive.
For example the ``Reproducibility Project: Cancer Biology'' initiative started in 2013 to replicate 50 high-impact papers in cancer biology\footnote{\url{https://elifesciences.org/collections/9b1e83d1/reproducibility-project-cancer-biology}}.
Even with a funding of at least \$1.3 million, it later shrunk to 18 projects \citep{kaiser18} due to very high costs.
We also note that replicability doesn't have to be limited to different input data: using the same data, but with different implementations of methods, is also a replication attempt \citep[also known as ``in silico'' experiments, see][]{stevens03}.
\end{itemize}

\tonote{Raul: put white line to separate next paragraph from the previous list?}
Some authors have defined these terms in the opposite manner.
Examples include \citet{hinsen15} and the policy guidelines of the Association of Computing Machinery\footnote{\url{https://www.acm.org/publications/policies/artifact-review-badging}} (ACM, dated April 2018).
ACM has itself adopted the 2008 definitions of Vocabulaire international de m\'etrologie (VIM).

Besides the two terms above, ``repeatability'' is also sometimes used in regards to the concept discussed here and must be clarified.
For example, \citet{ioannidis2009} use ``repeatability'' to encompass both the terms above.
However, the ACM/VIM definition for repeatability is ``a researcher can reliably repeat her own computation''.
Hence, in the ACM terminology, the only difference between replicability and repeatability is the ``team'' that is conducting the computation.
In the context of this paper, inputs are precisely defined (Section \ref{definition:input}): files with specific/registered checksums (see Section \ref{principle:verify}).
Therefore our inputs are team-agnostic, allowing us to safely ignore ``repeatability'' as defined by ACM/VIM.




















\section{Principles of the proposed solution}
\label{sec:principles}

The core principle behind this solution is simple: science is defined by its method, not its result.
Statements that convey a ``result'' abound in all aspects of human life (e.g., in fiction, religion and science).
What distinguishes one from the other is the ``method'' that the result was derived.
Science is the only class that attempts to be as objective as possible through the ``scientific method''.
\citet{buckheit1995} nicely summarize this by pointing out that modern scientific papers (narrative combined with plots, tables and figures) are merely advertisements of a scholarship, the actual scholarship is the scripts and software usage that went into doing the analysis.

This paper thus proposes a framework that is optimally designed for both designing and executing a project, \emph{as well as} publication of the (computational) methods along with the published paper/result.
However, this paper is not the first attempted solution to this fundamental problem.
Various solutions have been proposed since the early 1990s, see Appendix \ref{appendix:existingsolutions} for a review.
To better highlight the differences with those methods, and the foundations of this method (which help in understanding certain implementation choices), in the sub-sections below, the core principle above is expanded by breaking it into logically independent sub-components.

It is important to note that based on the definition of a project (Section \ref{definition:project}) and the first principle below (modularity, Section \ref{principle:modularity}) this paper is designed to be modular and thus agnostic to high-level choices.
For example the choice of hardware (e.g., high performance computing facility or a personal computer), or high-level interfaces (for example a webpage or specialized graphic user interface).
The proposed solution in this paper is a low-level skeleton that is designed to be easily adapted to any high-level, project-specific, choice.
For example, in terms of hardware choice, a large simulation project simply cannot be run on smaller machines.
However, when such a project is managed in the proposed system, the complete project (see Section \ref{principle:complete}) is published and readable by peers, who can be sure that what they are reading, contains the full/exact environment and commands that produced the result.
In terms of interfaces, wrappers can be written over this core skeleton for various other high-level cosmetics, for example a web interface, a graphic user interface or plugins to text editors or notebooks (see Appendix \ref{appendix:editors}).





\subsection{Principle: Complete/Self-contained}
\label{principle:complete}
A project should be self-contained, needing no particular features from the host operating system (OS), and not affecting the host OS.
At build-time (when the project is building its necessary tools), the project shouldn't need anything beyond a minimal POSIX environment on the host which is available in Unix-like operating system like GNU/Linux, BSD-based or macOS.
At run-time (when environment/software are built), it should not use or affect any host operating system programs or libraries.

Generally, a project's source should include the whole project: access to the inputs (see Section \ref{sec:definitions}), building necessary software (access to tarballs and instructions on configuring, building and installing those software), doing the analysis (run the software on the data) and creating the final narrative report/paper in its final format.
This principle has several important consequences:

\begin{itemize}
\item A complete project doesn't need any privileged/root permissions for system-wide installation, or environment preparations.
  Even when the user does have root privileges, interfering with the host operating system for a project, may lead to many conflicts with the host or other projects.
  This principle thus allows a safe execution of the project, and will not cause any security problems.

\item A complete project doesn't need an internet connection to build itself or to do its analysis and possibly make a report.
  Of course this only holds when the analysis doesn't inherently require internet, for example needing a live data feed.

\item A complete project inherently includes the complete data lineage and provenance: automatically enabling a full backtrace of the output datasets or narrative, to raw inputs: data or software source code lines.
  This is very important because many existing data provenance solutions require manual tagging within the data workflow or connecting the data with the paper's text (Appendix \ref{appendix:existingsolutions}).
  Manual tagging can be highly subjective, prone to many errors, and incomplete.

\item A complete project will not need any user interaction and can complete itself automatically.
  This is because manual interaction is an incompleteness.
  Interactivity is also an inherently irreproducible operation, exposing the analysis to human error, and requiring expert knowledge.
\end{itemize}

The first two components are particularly important for high performance computing (HPC) facilities: because of security reasons, HPC users commonly don't have privileged permissions or internet access.

A complete project as defined here is much less exposed to ``workflow decay'' as defined by \citet{zhao12} (in particular under their missing execution environment tests).
As recommended by \citet{zhao12}, a complete project automatically builds all its necessary third-party tools, it doesn't just assume their existence.
Ultimately, the executability of a project will decay once the host Linux kernel inevitably evolves to such that the project's fixed version of the GNU C Library and GNU C Compiler can't be built.
This will happen on much longer time scales than the high-level software mentioned in \citet{zhao12} and can be fixed by changing the project's (GNU) C library and (GNU) C Compiler to versions that are build-able with the host kernel.
These are very low-level components and any possible change in the output should be minimal.
Ultimately after multiple decades, even that may not be possible, but even at that point, thanks to the plain-text principle (Section \ref{principle:text}), it can still be studied, without necessarily executing it.





\subsection{Principle: Modularity}
\label{principle:modularity}
A project should be compartmentalized or partitioned to independent modules or components with well-defined inputs/outputs having no side-effects.
In a modular project, communication between the independent modules is explicit, providing optimizations on multiple levels:
1) Execution: independent modules can run in parallel, or modules that don't need to be run (because their dependencies haven't changed) won't be re-done.
2) Data lineage and data provenance extraction (recording any dataset's origins).
3) Citation: allowing others to credit specific parts of a project.
This principle doesn't just apply to the analysis, it also applies to the whole project, for example see the definitions of ``input'', ``output'' and ``project'' in Section \ref{sec:definitions}.

Within the analysis phase, this principle can be summarized best with the Unix philosophy, best described by \citet{mcilroy78} in the ``Style'' section.
In particular ``Make each program do one thing well.
To do a new job, build afresh rather than complicate old programs by adding new `features'''.
Independent parts of the analysis can be maintained as independent software (for example shell, Python, or R scripts, or programs written in C, C++ or FORTRAN, among others).
This core aspect of the Unix philosophy has been the cause of its continued success (particularly through GNU and BSD) and development in the last half century.

For the most high-level analysis/operations, the boundary between the ``analysis'' and ``project'' can become blurry.
It is thus inevitable that some highly project-specific, and small, analysis steps are also kept within the project and not maintained as a separate software package (that is built before the project is run).
This isn't a problem, because inputs are defined as files that are \emph{usable} by other projects (see Section \ref{definition:input}).
If necessary, such highly project-specific software can later spin-off into a separate software package later.
One example of an existing system that doesn't follow this principle is Madagascar, it builds a large number of analysis programs as part of the project (see Appendix \ref{appendix:madagascar}).

%\tonote{Find a place to put this:}   Note that input files are a subset of built files: they are imported/built (copied or downloaded) using the project's instructions, from an external location/repository.
%  This principle is inspired by a similar concept in the free and open source software building tools like the GNU Build system (the standard `\texttt{./configure}', `\texttt{make}' and `\texttt{make install}'), or CMake.
%  Software developers already have decades of experience with the problems of mixing hand-written source files with the automatically generated (built) files.
%  This principle has proved to be an exceptionally useful in this model, greatly



\subsection{Principle: Plain text}
\label{principle:text}
A project's primarily stored/archived format should be plain text with human-readable encoding\footnote{Plain text format doesn't include document container formats like \inlinecode{.odf} or \inlinecode{.doc}, for software like LibreOffice or Microsoft Office.}, for example ASCII or Unicode (for the definition of a project, see Section \ref{definition:project}).
The reason behind this principle is that opening, reading, or editing non-plain text (executable or binary) file formats needs specialized software.
Binary formats will complicate various aspects of the project: its usage, archival, automatic parsing, or human readability.
This is a critical principle for long term preservation and portability: when the software to read binary format has been depreciated or become obsolete and isn't installable on the running system, the project will not be readable/usable any more. % should replace `installable`?

A project that is solely in plain text format can be put under version control as it evolves, with easy tracking of changed parts, using already available and mature tools in software development: software source code is also in plain text.
After publication, independent modules of a plain-text project can be used and cited through services like Software Heritage \citep{dicosmo18,dicosmo20}, enabling future projects to easily build on top of old ones, or cite specific parts of a project.

Archiving a binary version of the project is like archiving a well cooked dish itself, which will be inedible with changes in hardware (temperature, humidity, and the natural world in general).
But archiving the dish's recipe (which is also in plain text!): you can re-cook it any time.
When the environment is under perfect control (as in the proposed system), the binary/executable, or re-cooked, output will be verifiably identical. % should replace `verifiably` with another word?
One illustrative example of the importance of source code is mentioned in \citet{smart18}: a seven-year old dispute between condensed matter scientists could only be solved when they shared the plain text source of their respective projects.

This principle doesn't conflict with having an executable or immediately-runnable project\footnote{In their recommendation 4-1 on reproducibility, \citet{fineberg19} mention: ``a detailed description of the study methods (ideally in executable form)''.}. % should replace `runnable`?
Because it is trivial to build a text-based project within an executable container or virtual machine.
For more on containers, please see Appendix \ref{appendix:independentenvironment}.
To help contemporary researchers, this built/executable form of the project can be published as an output in respective servers like \url{http://hub.docker.com} (see Section \ref{definition:output}).

Note that this principle applies to the whole project, not just the initial phase.
Therefore a project like Conda that currently includes a $+500$MB binary blob in a plain-text shell script (see Appendix \ref{appendix:conda}) is not acceptable for this principle. % is it `Anaconda` or `Conda` project?
This failure also applies to projects that build tools to read binary sources.
In short, the full source of a project should be in plain text.





\subsection{Principle: Minimal complexity (i.e., maximal compatibility)}
\label{principle:complexity}
An important measure of the quality of a project is how much it avoids complexity.
In principle this is similar to Occam's razor: ``Never posit pluralities without necessity'' \citep{schaffer15}, but extrapolated to project management.
In this context Occam's razor can be interpreted like the following cases:
minimize the number of a project's dependency software (there are often multiple ways of doing something),
avoid complex relations between analysis steps (which is not unrelated to the principle of modularity in Section \ref{principle:modularity}),
or avoid the programming language that is currently in vogue because it is going to fall out of fashion soon and take the project down with it, see Appendix \ref{appendix:highlevelinworkflow}).
This principle has several important consequences:
\begin{itemize}
\item Easier learning curve.
Scientists can't adopt new tools and methods as fast as software developers.
They have to invest the majority of their time on their own research domain.
Because of this researchers usually continue their career with the language/tools they learned when they started.

\item Future usage.
Scientific projects require longevity: unlike software engineering, there is no end-of-life in science (e.g., Aristotle's work 2.5 millennia ago is still ``science'').
Scientific projects that depend too much on an ever evolving, high-level software developing toolchain, will be harder to archive, run, or even study for their immediate and future peers.
One recent example is the Popper software implementation: it was originally designed in the HashiCorp configuration language (HCL) because it was the default for organizing operations in GitHub. % should names like HashiCorp be formatted in italics?
However, GitHub dropped HCL in October 2019, for more see Appendix \ref{appendix:popper}.

\item Compatible and extensible.
  A project that has minimal complexity, can easily adapt to any kind of data, programming language, host hardware or software and etc.
  It can also be easily extended for new inputs and environments.
  For example when a project management system is designed only to manage Python functions (like CGAT-core, see Appendix \ref{appendix:jobmanagement}), it will be hard, inefficient and buggy for managing an analysis step that is written in R and another written in FORTRAN.
\end{itemize}





\subsection{Principle: Verifiable inputs and outputs}
\label{principle:verify}
The project should contain automatic verification checks on its inputs (software source code and data) and outputs.
When applied, expert knowledge won't be necessary to confirm the correct reproduction.
It is just important to emphasize that in practice, exact or bit-wise reproduction is very hard to implement at the level of a file.
This is because many specialized scientific software commonly print the running date on their output files (which is very useful in its own context).

For example in plain text tables, such meta-data are commonly printed as commented lines (usually starting with \inlinecode{\#}).
Therefore when verifying such a plain text table, the checksum which is used to validate the data, can be recorded after removing all commented lines.
Fortunately, the tools to operate on specialized data formats also usually have ways to remove requested metadata (like creation date), or ignore metadata altogether.
For example the FITS standard in astronomy \citep{pence10} defines a special \inlinecode{DATASUM} keyword which is a checksum calculated only from the raw data, ignoring all metadata.





\subsection{Principle: History and temporal provenance (version control)}
\label{principle:history}
No project is done in a single/first attempt.
Projects evolve as they are being completed.
It is natural that earlier phases of a project are redesigned/optimized only after later phases have been completed.
This is often seen in scientific papers, with statements like ``we [first] tried method [or parameter] XXXX, but YYYY is used here because it showed to have better precision [or less bias, or etc]''.
A project's ``history'' is thus as scientifically relevant as the final, or published, snapshot of the project.
All the outputs (datasets or narrative papers) need to contain the exact point in the project's history that produced them.

For a complete project (see Section \ref{principle:complete}) that is under version control (like Git), this would be the unique commit checksum (for more on version control, see Appendix \ref{appendix:versioncontrol}).
This principle thus benefits from the plain-text principle (Section \ref{principle:text}).
Note that with our definition of a project (Section \ref{definition:project}), ``changes'' in the project include changes in the software building or versions, changes in the running environment, changes in the analysis, or changes in the narrative.
After publication, the project's history can also be published on services like Software Heritage \citep{dicosmo18}, enabling precise citation and archival of all stages of the project's evolution.

Taking this principle to a higher level, newer projects are built upon the shoulders of previous projects.
A project management system should be able to provide this temporal connection between projects.
Quantifying how newer projects relate to older projects (for example through Git branches) will enable 1) scientists to simply use the relevant parts of an older project, 2) quantify the connections of various projects, which is primarily of interest for meta-research (research on research) or historical studies.
In data science, ``provenance'' is used to track the analysis and original datasets that were used in producing a higher-level dataset.
A system that uses this principle will also provide ``temporal provenance'', quantifying how a certain project grew/evolved in the time dimension.





\subsection{Principle: Free and open source software}
\label{principle:freesoftware}
Technically, as defined in Section \ref{definition:reproduction}, reproducibility is also possible with a non-free and non-open-source software (a black box).
This principle is thus necessary to complement the definition of reproducibility.
This is because software freedom as an important pillar for the sciences as shown below:
\begin{itemize}
\item Based on the completeness principle (Section \ref{principle:complete}), it is possible to trace the output's provenance back to the exact source code lines within an analysis software.
  If the software's source code isn't available such important and useful provenance information is lost.
\item A non-free software may not be runnable on a given hardware. % should use an alternative word for `runnable`? If yes, please consider replacing it in the whole document to keep consistency.
  Since free software is modifiable, others can modify (or hire someone to modify) it and make it runnable on their particular platform.
\item A non-free software cannot be distributed by the authors, making the whole community reliant only on the proprietary owner's server (even if the proprietary software doesn't ask for payments).
  A project that uses free software can also release the necessary tarballs of the software it uses.
  For example see \href{https://doi.org/10.5281/zenodo.3408481}{zenodo.3408481} \citep{akhlaghi19} or \href{https://doi.org/10.5281/zenodo.3524937}{zenodo.3524937} \citep{infante20}.
\item A core component of reproducibility is that anonymous peers should be able confirm the result from the same datasets with minimal effort, and this includes financial cost beyond hardware.
\end{itemize}




















\section{Implementation of Maneage}
\label{sec:maneage}

The proposed solution is an implementation of the principles discussed in Section \ref{sec:principles}: it is complete and automatic (Section \ref{principle:complete}), modular (Section \ref{principle:modularity}), fully in plain text (Section \ref{principle:text}), having minimal complexity (see Section \ref{principle:complexity}), with automatically verifiable inputs \& outputs (Section \ref{principle:verify}), preserving temporal provenance, or project evolution (Section \ref{principle:history}) and finally, it is free software (Section \ref{principle:freesoftware}).

In practice it is a collection of plain-text files, that are distributed in pre-defined sub-directories by context, and are all under version-control (currently with Git).
In its raw form (before customizing for different projects), it is a fully working skeleton of a project without much flesh: containing all the low-level infrastructure, with just a small demonstrative ``delete-me'' analysis.
To start a new project, users will \emph{clone}\footnote{In Git, ``clone''ing is the process of copying all the project's file and their history into the host system.} the core skeleton, create their own Git branch, and start customizing the core files (adding their high-level analysis steps, scripts to generate figure and narrative) within their custom branch. % should replace ``clone''ing with ``cloning''?

In this section we will review the current implementation of the reproducible paper template.
Generally, job orchestration is implemented in Make (a POSIX software), this choice is elaborated in Section \ref{sec:usingmake}.
We continue with a general outline of the project's file structure in Section \ref{sec:generalimplementation}.
As described there, we make a cosmetic distinction between ``configuration'' (or building of necessary software) and execution (or running the software on the data), these two phases are discussed in Sections \ref{sec:projectconfigure} \& \ref{sec:projectmake}.


\subsection{Job orchestration with Make}
\label{sec:usingmake}
When non-interactive, or batch, processing is needed (see Section \ref{principle:complete}), shell scripts are usually the first solution that come to mind (see Appendix \ref{appendix:scripts}).
However, the inherent complexity and non-linearity of progress in a scientific project (where experimentation is key) makes it hard and inefficient to manage the script(s) as the project evolves.
For example, a script will start from the top/start every time it is run.
Therefore, even if $90\%$ of a research project is done and only the newly added, final $10\%$ must be executed, a script will always start from the start.

It is possible to manually ignore (by conditionals), or comment, parts of a script to only do a special part.
However, such conditionals/comments will only add to the complexity and will discourage experimentation on an already completed part of the project.
This is also prone to very serious bugs in the end (e.g., due to human error, some parts may be left-out or not up to date), when re-running from scratch.
Such bugs are very hard to notice during the work and frustrating to find in the end.
These problems motivated the creation of Make in the early Unix operating system \citep{feldman79}.

In the Make paradigm, process execution starts from the end: the final \emph{target}.
Through the Make syntax, the user specifies the \emph{prerequisite}(s) of each target and a \emph{recipe} (a small shell script) to create the target from the prerequisites (for more see Appendix \ref{appendix:make}).
With this lineage, Make is thus able to build a dependency tree internally and find the rule(s) that need to be executed on each run.
This has many advantages:
\begin{itemize}
\item \textbf{\small Only executing necessary steps:} in the scenario above, a researcher that has just added the final $10\%$ of her research, will only have to run those extra steps, without any modification to the previous parts.
  With Make, it is also trivial to change the processing of any intermediate (already written) \emph{rule} (or step) in the middle of an already written analysis: the next time Make is run, only rules that are affected by the changes/additions will be re-run, not the whole analysis/project.

Most importantly, this enables full reproducibility from scratch with no changes in the project code that was working during the research.
This will allow robust results and let scientists do what they do best: experiment, and be critical to the methods/analysis without having to waste energy on the added complexity of experimentation in scripts.

\item \textbf{\small Parallel processing:} Since the dependencies are clearly demarcated in Make, it can identify independent steps and run them in parallel.
  This greatly speeds up the processing, with no cost in terms of complexity.

\item \textbf{\small Codifying data lineage and provenance:} In many systems data provenance has to be manually added.
  However, in Make, it is part of the design and no extra manual step is necessary to fully track (or back-track) the series of steps that generated the data.
\end{itemize}

Make has been a fixed component of POSIX (or Unix-like operating systems including Unix, GNU/Linux, BSD, and macOS, among others) from very early days of Unix almost 40 years ago.
It is therefore, by far, the most reliable, commonly used, well-known and well-maintained workflow manager today.
Because the core operating system components are built with it, Make is expected to keep this unique position into the foreseeable future.
Make is also well known by many outside of the software developing communities.
For example \citet{schwab2000} report how geophysics students have easily adopted it for the RED project management tool used in their lab at that time (see Appendix \ref{appendix:red} for more on RED).
Because of its simplicity, we have also had very good feedback on using Make from the early adopters of this system during the last year, in particular graduate students and postdocs.

In summary Make satisfies all our principles (see Section \ref{sec:principles}), while avoiding the well-known problems of using high-level languages for project management like a generational gap and ``dependency hell'', see Appendix \ref{appendix:highlevelinworkflow}.
For more on Make and a discussion on some other job orchestration tools, see Appendices \ref{appendix:make} and \ref{appendix:jobmanagement} respectively.





\subsection{General implementation structure}
\label{sec:generalimplementation}

As described above, a project using this template is a combination of plain-text files that are organized in various directories by context.
Figure \ref{fig:files} shows this directory structure and some representative files in each directory.
The top-level source only has two main directories: \inlinecode{tex/} (containing \LaTeX{} files) and \inlinecode{reproduce/} (containing all other parts of the project) as well as several high-level files.
Most of the top project directory files are only intended for human readers (as narrative text, not scripts or programming sources):
\inlinecode{COPYING} is the project's high-level copyright license,
\inlinecode{README.md} is a basic introduction to the specific project, and
\inlinecode{README-hacking.md} describes how to customize, or hack, the template for creators of new projects.

In the top project directory, there are two non-narrative files: \inlinecode{project} (which should have been under \inlinecode{reproduce/}) and \inlinecode{paper.tex} (which should have been under \inlinecode{tex/}).
The former is necessary in the top project directory because it is the high-level user interface, with the \inlinecode{./project} command.
The latter is necessary for many web-based automatic paper generating systems like arXiv, journals, or systems like Overleaf.

\begin{figure}[t]
  \begin{center}
    \includetikz{figure-file-architecture}
  \end{center}
  \vspace{-5mm}
  \caption{\label{fig:files}
    Directory and file structure in a hypothetical project using this solution.
    Files are shown with small, green boxes that have a suffix in their names (for example \inlinecode{format.mk} or \inlinecode{download.tex}).
    Directories (containing multiple files) are shown as large, brown boxes, where the name ends in a slash (\inlinecode{/}).
    Directories with dashed lines and no files (just a description) are symbolic links that are created after building the project, pointing to commonly needed built directories.
    Symbolic links and their contents are not considered part of the source and are not under version control.
    Files and directories are shown within their parent directory.
    For example the full address of \inlinecode{format.mk} from the top project directory is \inlinecode{reproduce/analysis/make/format.mk}.
  }
\end{figure}

\inlinecode{project} is a simple executable POSIX-compliant shell script, that is just a high-level wrapper script to call the project's Makefiles. % should the `makefile` be in capitalized?
Recall that the main job orchestrator in this system is Make, see Section \ref{sec:usingmake} for why Make was chosen.
In the current implementation, the project's execution consists of the following two calls to the \inlinecode{project} script:

\begin{lstlisting}[language=bash]
  ./project configure      # Build software from source (takes around 2 hours for full build).
  ./project make           # Do the analysis (download data, run software on data, build PDF).
\end{lstlisting}

The operations of both are managed by files under the top-level \inlinecode{reproduce/} directory.
When the first command is called, the contents of \inlinecode{reproduce\-/software} are used, and the latter calls files under \inlinecode{reproduce\-/analysis}.
This highlights the \emph{cosmetic} distinction we have adopted between the two main steps of a project: 1) building the project's full software environment and 2) doing the analysis (running the software).
Technically there is no difference between the two and they could easily be merged under one directory.
However, during a research project, researchers commonly just need to focus on their analysis steps and will rarely need to edit the software environment settings (maybe only once at the start of the project).
Therefore, having the files mixed under the same directory can be confusing.

In summary, the same structure governs both aspects of a project: software building and analysis.
This is an important and unique feature in this template.
A researcher that has become familiar with Makefiles for orchestrating their analysis, will also easily be able to modify the Makefiles for the software that is built in their project, and feel free to customize their project's software also.
Most other systems use third-party package managers for their project's software, thus discouraging project-specific customization of software, for a full review of third party package managers, see Appendix \ref{appendix:packagemanagement}.





\subsection{Project configuration}
\label{sec:projectconfigure}
A critical component of any project is the set of software used to do the analysis.
However, verifying an already built software environment (which is critical to reproducing the research result) is a very hard.
This has forced most projects to move around the whole \emph{built} software environment (a black box) as virtual machines or containers, see Appendix \ref{appendix:independentenvironment}.
Because these black boxes are almost impossible to reproduce themselves, they need to be archived, even though they can take gigabytes of space.
Package managers like Nix or GNU Guix do provide a verifiable, i.e., reproducible, software building environment, but because they aim to be generic package managers, they have their own limitations on a project-specific level, see Appendix \ref{appendix:nixguix}.

Based on the principles of completeness and minimal complexity (Sections \ref{principle:complete} \& \ref{principle:complexity}), a project that uses this solution, also contains the full instructions to build its necessary software in the same language that the analysis is orchestrated: Make.
Project configuration (building software environment) is managed by the files under \inlinecode{reproduce\-/software}.
Project configuration involves three high-level steps which are discussed in the subsections below: setting the local directories (Section \ref{sec:localdirs}), checking a working C compiler (Section \ref{sec:ccompiler}), and the software source code download, build and install (Section \ref{sec:buildsoftware}).





\subsubsection{Setting local directories}
\label{sec:localdirs}
All files built by the project (software or analysis) will be under a ``build directory'' (or\inlinecode{BDIR}) on the host filesystem.
No other location on the running operating system will be affected by the project.
Following the modularity principle (Section \ref{principle:modularity}), this directory should be separate from the source directory.
Therefore, at configuration time, the first thing to specify is the build directory on the running system.
The build directory can be specified in two ways: 1) on the command-line with the \inlinecode{--build-dir} option, or 2) manually giving the directory after running the configuration: it will stop with a prompt and some explanation.

Two other local directories can optionally be specified by the project when inputs are present locally (for the definition of inputs, see Section \ref{definition:input}) and don't need to be downloaded: 1) software tarball directory and 2) input data directory.
The project just needs reading permissions on these directories: when given, nothing will be written inside of them.
The project will only look into them for the necessary software tarballs and input data.
If they are not found, the project will attempt to download any necessary file from the recoded URLs/PIDs within the project source.
These directories are therefore primarily tailored to scenarios where the project must run offline (based on the completeness principle of Section \ref{principle:complete}).

After project configuration, a symbolic link is built the top project source directory that points to the build directory.
The symbolic link is a hidden file named \inlinecode{.build}, see Figure \ref{fig:files}.
With this symbolic link, its always very easy to access to built files, no matter where the build directory is actually located on the filesystem.





\subsubsection{Checking for a C compiler}
\label{sec:ccompiler}
This template builds all its necessary software internally to avoid dependency issues with various software versions on different hosts.
A working C compiler is thus mandatory and the configure script will abort if a working C compiler isn't found.
In particular, on GNU/Linux systems, the project builds its own version of the GNU Compiler Collection (GCC), therefore a static C library is necessary with the compiler.
If not found, an informative error message will be printed and the project will abort.

The custom version of GCC is configured to also build FORTRAN, C++, objective-C and objective-C++ compilers.
Python and R running environments are themselves written in C, therefore they are also automatically built afterwards if the project uses these languages.
On macOS systems, we currently don't build a C compiler, but it is planned to do so in the future.





\subsubsection{Verifying and building necessary software from source}
\label{sec:buildsoftware}

All necessary software for the project, and their dependencies, are installed from source.
Researchers using the template only have to specify the most high-level analysis software they need in \inlinecode{reproduce\-/software\-/config\-/installation\-/TARGETS.conf} (see Figure \ref{fig:files}).
Based on the completeness principle (Section \ref{principle:complete}), on GNU/Linux systems the dependency tree is automatically traced down to the GNU C Library and GNU Compiler Collection (GCC).
Thus creating identical high-level analysis software on any system.
When the C library and compiler can't be installed (for example on macOS systems), the users are forced to rely on the host's C compiler and library, and this may hamper the exact reproducibility of the final result: the project will abort if the final outputs have changed.
Because the project's main output is currently a \LaTeX{}-built PDF, the project also contains an internal installation of \TeX{}Live, providing all the necessary tools to build the PDF, independent of the host operating system's \LaTeX{} version and packages.

To build the software from source, the project needs access to its source tarball or zip-file.
If the tarballs are already present on the system, the user can specify the respective directory at the start of project configuration (Section \ref{sec:localdirs}).
If not, the software tarballs will be downloaded from pre-defined servers.
Ultimately the origin of the tarballs is irrelevant for this project, what matters is the tarball contents: checked through the SHA-512 checksum \citep[part of the SHA-2 algorithms, see][]{romine15}.
If the SHA-512 checksum of the tarball is different from the checksum stored for it in the project's source, the project will complain and abort.
Because the server is irrelevant, one planned task\tonote{add task number} is to allow users to identify the most convenient server themselves, for example to improve download speed.

Software tarball access, unpacking, building and installation is managed through Makefiles, see Sections \ref{sec:usingmake} \& \ref{sec:generalimplementation}.
The project's software are classified into two classes: 1) basic and 2) high-level.
The former contains meta-software: software needed to build other software, for example GNU Gzip, GNU Tar, GNU Make, GNU Bash, GNU Coreutils, GNU SED, GNU Binutils, GNU Compiler Collection (GCC) and etc\footnote{Note that almost all these GNU software are also installable on non-GNU/Linux operating systems like BSD or macOS also, exceptions include GNU Binutils.}.
The basic software are built with the host operating system's tools and are installed with any project.
The high-level software are those that are used directly in the science analysis and can differ from project to project.
However, because the basic software have already been built by the project, the higher-level software are built with them and independent of the host operating system's tools.

Software building is managed by two top-level Makefiles that follow the same classification.
Both are under the \inlinecode{reproduce\-/softwar\-e/make/} directory (Figure \ref{fig:files}): \inlinecode{basic.mk} and \inlinecode{high-level.mk}.
Because \inlinecode{basic.mk} can't assume anything about the host, it is written to comply with POSIX Make and POSIX shell, which are very limited compared to GNU Make and GNU Bash respectively.
However, after it is finished, a specific version of GNU Make (among other basic software), is present, enabling us to assume the much advanced features of GNU tools in \inlinecode{high-level.mk}.

The project's software are installed under \inlinecode{BDIR/software/installed}.
The \inlinecode{.local} symbolic link in the top project source directory points to it for easy access (see Figure \ref{fig:files}).
It contains the top-level POSIX filesystem hierarchy subdirectories for the project including \inlinecode{bin/}, \inlinecode{lib/}, \inlinecode{include/} among others.
For example the custom-built GNU Make executable is placed under \inlinecode{BDIR\-/software\-/installed\-/bin\-/make} or alternatively \inlinecode{.local/bin/make}.

To orchestrate software building with Make, the building of each software should be represented as a file.
In the Makefiles that file should be used as a \emph{target}, in the rule that builds the software, or \emph{prerequisite}, in the rule(s) of software that depend on it.
For more on Make, see Appendix \ref{appendix:make}.
Initially we tried using the actual software's built files (executable programs, libraries or etc).
However, given the variety of installed files, using them as the software's representative caused many complexities, confusions and bugs.
Therefore, in the current system, once a software is built, a simple plain-text file is created under a sub-directory of \inlinecode{.local\-/version-info}.
The representative files for C/C++ programs or libraries are placed under the \inlinecode{proglib} sub-directory.
The Python or \TeX{}Live representative files are placed under the \inlinecode{python} and \inlinecode{tex} subdirectories respectively.
Make uses this file to refer to the software and arrange the order of software execution.
The contents of this plain-text file are the name and possible citation to the software that are directly imported into the final paper in the end.
For more on software citation, see Section \ref{sec:softwarecitation}.



\subsubsection{Software citation}
\label{sec:softwarecitation}
Based on the completeness principle (Section \ref{principle:complete}), the project contains the full list of installed software, their versions and their configuration options.
However, this information is buried deep into the project's source.
A distilled fraction of this information must also be printed in the project's final report, blended into the narrative.
Furthermore, when a published paper is associated with the used software, it is important to cite that paper, the citations help software authors gain more recognition and grants, encouraging them to further develop it.
This is particularly important in the case for research software, where the researcher has invested significant time in building the software, and requires official citation to justify continued work on it.

One notable example that nicely highlights this issue is GNU Parallel \citep{tange18}: every time it is run, it prints the citation information before it starts.
This doesn't cause any problem in automatic scripts, but can be annoying when reading/debugging the outputs.
Users can disable the notice, with the \inlinecode{--citation} option and accept to cite its paper, or support its development directly by paying $10000$ euros!
This is justified by an uncomfortably true statement\footnote{GNU Parallel's FAQ on the need to cite software: \url{http://git.savannah.gnu.org/cgit/parallel.git/plain/doc/citation-notice-faq.txt}}: ``history has shown that researchers forget to [cite software] if they are not reminded explicitly. ... If you feel the benefit from using GNU Parallel is too small to warrant a citation, then prove that by simply using another tool''.
In bug 905674\footnote{Debian bug on the citation notice of GNU Parallel: \url{https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=905674}}, the Debian developers argued that because of this extra condition, GNU Parallel should not be considered as free software, and they are using a patch to remove that part of the code for its build under Debian-based operating systems.
Most other research software don't resort to such drastic measures, however, citation is important for them.
Given the increasing number of software used in scientific research, the only reliable solution is to automatically cite the used software in the final paper.

As mentioned above in Section \ref{sec:buildsoftware}, a plain-text file is built automatically at the end of a software's successful build and installation.
This file contains the name, version and possible citation of that software.
At the end of the configuration phase, all these plain-text files are merged into one \LaTeX{} macro that can be imported directly into the final paper or report.
In this paper, this macro's value is shown in Appendix \ref{appendix:softwareacknowledge}.
The paragraph produced by this macro won't be too large, but it will greatly help in the recording of the used software environment and will automatically cite the software where necessary.

In the current version of this template it is assumed the published report of a project is built by \LaTeX{}.
Therefore, every software that has an associated paper, has a Bib\TeX{} file under the \inlinecode{reproduce\-/software\-/bibtex} directory.
When the software is built for the project (possibly as a dependency of another software specified by the user), the Bib\TeX{} entry(s) are copied to the build directory and the command to cite that Bib\TeX{} record is included in the \LaTeX{} macro with the name and version of the software, as shown in Appendix \ref{appendix:softwareacknowledge}.

For a review of the necessity and basic elements in software citation, see \citet{katz14} and \citet{smith16}.
There are ongoing projects specifically tailored to software citation, including CodeMeta (\url{https://codemeta.github.io}) and Citation file format (CFF: \url{https://citation-file-format.github.io}).
Both are based on scheme.org, but are respectively implemented in the JSON-LD and YAML.
Another robust approach is provided by SoftwareHeritage \citep{dicosmo18}.
The feature of the SoftwareHeritage is that a published paper isn't necessary and it won't populate a research paper's bibliography list.
However, this also makes it hard to count as academic credit.
We are considering using these tools, and export Bib\TeX{} entries when necessary.










\subsection{High-level organization of analysis}
\label{sec:highlevelanalysis}

Once a project is configured (Section \ref{sec:projectconfigure}), all the necessary software, with precise versions and configurations, are built and ready to use.
The analysis phase of the project (running the software on the data) is also orchestrated through Makefiles.
For the unique advantages of using Make to manage a research project, see Sections \ref{sec:usingmake} \& \ref{sec:generalimplementation}.
In order to best follow the principle of modularity (Section \ref{principle:modularity}), the analysis is not done in one phase or with a single Makefile.
Here, the two high-level phases of the analysis are reviewed.
The organization of the lower-level analysis, in many modular Makefiles, is discussed in Section \ref{sec:lowlevelanalysis}.

After running \inlinecode{./project make}, the analysis is done in two sequential phases: 1) preparation and 2) main analysis.
The purpose of the preparation phase is further elaborated in Section \ref{sec:prepare}.
Technically, these two phases are managed by the two high-level Makefiles: \inlinecode{top-prepare.mk} and \inlinecode{top-make.mk}.
Both are under \inlinecode{reproduce\-/analysis\-/make} (see Figure \ref{fig:files}) and both have an identical lower-level analysis structure.
But before that, in Section \ref{sec:analysisenvironment} the isolation of the analysis environment from the host is discussed.





\subsubsection{Isolated analysis environment}
\label{sec:analysisenvironment}
By default, the analysis part of the project is not exposed to any of the host's environment variables.
This is accomplished through the `\inlinecode{env -i}' command\footnote{Note that the project's self-built \inlinecode{env} program is used, not the one provided by the host operating system.
  Within the project, \inlinecode{env} is installed as part of GNU Coreutils and \inlinecode{-i} is short for \inlinecode{--ignore-environment}.}, which will remove the host environment.
The project will define its own values for standard environment variables to avoid using system or user defaults.
Combined with the fact that all the software were configured and compiled from source for each project at configuration time (Section \ref{sec:buildsoftware}), this completely isolates the analysis from the host operating system, creating an exactly reproducible result on any machine that the project can be configured.

For example, the project builds is own fixed version of GNU Bash (a command-line shell environment).
It also has its own \inlinecode{bashrc} startup script\footnote{The project's Bash startup script is under \inlinecode{reproduce\-/software\-/bash\-/bashrc.sh}, see Figure \ref{fig:files}.}, and the \inlinecode{BASH\_ENV} environment variable is set to load this startup script.
Furthermore, the \inlinecode{HOME} environment variable is set to \inlinecode{BDIR} to avoid the penetration of any existing Bash startup file of the user's home directory into the analysis.





\subsubsection{Preparation phase}
\label{sec:prepare}
When \inlinecode{./project make} is called, the first Makefile that is run is \inlinecode{top-prepare.mk}.
It is designed for any selection steps that may be necessary to optimize \inlinecode{top-make.mk}, or to ``prepare'' for it.
It is mainly useful when the research targets are more focused than the raw input and may not be necessary in many scenarios.
Its role is described here with an example.

Let's assume the raw input data (that the project received from a database) has 5000 rows (potential targets for doing the analysis on).
However, this particular project only needs to work on 100 of them, not the full 5000.
If the full 5000 targets are given to \inlinecode{top-make.mk}, Make will need to create a data lineage for all 5000 targets and project authors have to add checks in many places to ignore those that aren't necessary.
This will add to the project's complexity and is prone to many bugs.
Furthermore, if the filesystem isn't fast (for example a filesystem that exists over a network), checking all the intermediate and final files over the full lineage can be slow.

In this scenario, the preparation phase finds the IDs of the 100 targets of interest and saves them as a Make variable in a file under \inlinecode{BDIR}.
Later, this file is loaded into the analysis phase, precisely identifying the project's targets-of-interest.
This selection phase can't be done within \inlinecode{top-make.mk} because the full data lineage (all input and output files) must be known to Make before it starts to execute the necessary operations.
It is possible to for Make to call itself as another Makefile, but this practice is strongly discouraged here because it makes the flow very hard to read.
However, if the project authors insist on calling Make within Make, it is certainly possible.

The ``preparation'' phase thus allows \inlinecode{top-make.mk} to optimally organize the complex set of operations that must be run on each input and the dependencies (possibly in parallel).
It also greatly simplifies the coding for the project authors.
Ideally \inlinecode{top-prepare.mk} is only for the ``preparation phase''.
However, projects can be complex and ultimately, the choice of which parts of an analysis being a ``preparation'' can be highly subjective.
Generally, the internal design and concepts of \inlinecode{top-prepare.mk} are identical to \inlinecode{top-make.mk}.
Therefore in Section \ref{sec:lowlevelanalysis}, where the lower-level management is discussed, we will only focus on the latter to avoid confusion.










\subsection{Low-level organization of analysis}
\label{sec:lowlevelanalysis}

A project consists of many steps, including data access (possibly by downloading), running various steps of the analysis on the obtained data, and creating the necessary plots, figures or tables for a published report, or output datasets for a database.
If all of these steps are organized in a single Makefile, it will become very large, or long, and will be hard to maintain, extend/grow, read, reuse, and cite.
Generally, large files are a bad practice because it is against the modularity principle (Section \ref{principle:modularity}).

The proposed template is thus designed to encourage and facilitate modularity by distributing the analysis in many Makefiles that contain contextually-similar (or modular) analysis steps.
In the rest of this paper these modular, or lower-level, Makefiles will be called \emph{subMakefiles}.
The subMakefiles are loaded into \inlinecode{top-make.mk} in a certain order and executed in one instance of Make without recursion (see Section \ref{sec:nonrecursivemake} below).
In other words, this modularity is just cosmetic for Make: Make ``see''s all the subMakefiles as parts of one file.
However, this modularity plays a critical role for the human reader/author of the project and is necessary in re-using or citing parts of the analysis in other projects.

Within the project's source, the subMakefiles are placed in \inlinecode{reproduce\-/analysis\-/make} (with \inlinecode{top-make\-.mk}), see Figure \ref{fig:files}.
Therefore by design, \inlinecode{top-make.mk} is very simple: it just defines the ultimate target (\inlinecode{paper\-.pdf}), and the name and order of the subMakefiles that should be loaded into Make.

The precise organization of the analysis steps highly depends on each individual project.
However, many aspects of the project management are the same, irrespective of the particular project, here we will focus on those.
Figure \ref{fig:datalineage} is a general overview of the analysis phase in a hypothetical project using this template.
As described above and shown in Figure \ref{fig:datalineage}, \inlinecode{top-make.mk} imports the various Makefiles under the \inlinecode{reproduce/} directory that are in charge of the different phases of the analysis.
Each of the subMakefiles builds intermediate targets, or outputs (files), which are shown there as blue boxes.
In the subsections below, the project's analysis is described using this graph.
We'll follow Make's paradigm (see Section \ref{sec:usingmake}) of starting form the ultimate target in Section \ref{sec:paperpdf}, and tracing back its lineage all the way up to the inputs and configuration files.

\begin{figure}[t]
  \begin{center}
    \includetikz{figure-data-lineage}
  \end{center}
  \vspace{-7mm}
  \caption{\label{fig:datalineage}Schematic representation of data lineage in a hypothetical project/pipeline using Maneage.
    Each colored box is a file in the project and the arrows show the dependencies between them.
    Green files/boxes are plain text files that are under version control and in the source-directory.
    Blue files/boxes are output files of various steps in the build-directory, located within the Makefile (\inlinecode{*.mk}) that generates them.
    For example \inlinecode{paper.pdf} depends on \inlinecode{project.tex} (in the build directory and generated automatically) and \inlinecode{paper.tex} (in the source directory and written by hand).
    In turn, \inlinecode{project.tex} depends on all the \inlinecode{*.tex} files at the bottom of the Makefiles above it.
    The solid arrows and built boxes with full opacity are actually described in the context of a demonstration project in this paper.
    The dashed arrows and lower opacity built boxes, just shows how adding more elements to the lineage is also easily possible, making this a scalable tool.
  }
\end{figure}

To avoid getting too abstract in the subsections below, where necessary, we'll do a basic analysis on the data of \citet[data were published as supplementary material on bioXriv]{menke20} and try to replicate some of their results.
Note that because we are not using the same software, this isn't a reproduction (see Section \ref{definition:reproduction}).
We can't use the same software because they use Microsoft Excel for the analysis which violates several of our principles: 1) Completeness (as a graphic user interface program, it needs human interaction, Section \ref{principle:complete}), 2) Minimal complexity (even free software alternatives like LibreOffice involve many dependencies and are extremely hard to build, Section \ref{principle:complexity}) and 3) Free software (Section \ref{principle:freesoftware}).




\subsubsection{Non-recursive Make}
\label{sec:nonrecursivemake}

It is possible to call a new instance of Make within an existing Make instance.
This is also known as recursive Make\footnote{\url{https://www.gnu.org/software/make/manual/html_node/Recursion.html}}.
Recursive Make is in fact used by many Make users, especially in the software development communities.
It is also possible within a project using the proposed template.

However, recursive Make is discouraged in the template, and not used in it.
All the subMakefiles mentioned above are \emph{included}\footnote{\url{https://www.gnu.org/software/make/manual/html_node/Include.html}} into \inlinecode{top-make.mk}, i.e., their contents are read into Make as it is parsing \inlinecode{top-make.mk}.
In Make's view, this is identical to having one long file with all the subMakefiles concatenated to each other.
Make is only called once and there is no recursion.

Furthermore, we have the convention that only \inlinecode{top-make.mk} (or \inlinecode{top-prepare.mk}) can include subMakefiles.
SubMakefiles should not include other subMakefiles.
The main reason behind this convention is the Minimal Complexity principle (Section \ref{principle:complexity}): a simple glance at \inlinecode{top-make.mk}, will immediately show \emph{all} the project's subMakefiles \emph{and} their loading order.
When the names of the subMakefiles are descriptive enough, this enables both the project authors, and later, project readers to get a complete view of the various stages of the project.



\subsubsection{Ultimate target: the project's paper or report (\inlinecode{paper.pdf})}
\label{sec:paperpdf}

The ultimate purpose of a project is to report the data analysis result.
In scientific projects, this ``report'' is the published (or draft) paper.
In the industry, it is a quality-check and analysis of the final data product(s).
In both cases, the report contains many visualizations of the final data product of the project, for example as a plot, figure, table, or numbers blended into the narrative description.

In Figure \ref{fig:datalineage} it is shown as \inlinecode{paper.pdf}, note that it is the only built file (blue box) with no arrows leaving it.
In other words, nothing depends on it: highlighting its unique ``ultimate target'' position in the lineage.
The instructions to build \inlinecode{paper.pdf} are in \inlinecode{paper.mk}.
The report's source (containing the main narrative, its typesetting as well as that of the figures or tables) is \inlinecode{paper.tex}.
To build the final report's PDF, \inlinecode{references.tex} and \inlinecode{project.tex} are also loaded into \LaTeX{}.
\inlinecode{references.tex} is part of the project's source and can contain the Bib\TeX{} entries for the bibliography of the final report.
In other words, it formalizes the connections of this project with previous projects.

Another class of files that maybe loaded into \LaTeX{}, but are not shown to avoid complications in the figure, are the figure or plot data, or built figures.
For example in this paper, the demonstration figure shown in Section \ref{sec:analysis} is drawn directly within \LaTeX{} (using its PGFPlots package).
The project only needed to build the plain-text table of numbers that were fed into PGFPlots (\inlinecode{tools-per-year.txt} in Figure \ref{fig:datalineage}).

However, building some plots may not be possible with PGFPlots, or the authors may prefer another tool to generate the visualization's image file \citep[for example with Python's Matplotlib, ][]{matplotlib2007}.
For this scenario, the actual image file of the visualization can be used in the lineage, for example \inlinecode{tools-per-year.pdf} instead of \inlinecode{tools-per-year.txt}.
See Section \ref{sec:publishing} on the project publication for special considerations regarding these files.





\subsubsection{Values within text (\inlinecode{project.tex})}
\label{sec:valuesintext}
Figures, plots, tables and narrative aren't the only analysis output that goes into the paper.
In many cases, quantitative values from the analysis are also blended into the sentences of the report's narration.
For example this sentence in the abstract of \citet{akhlaghi19}: ``... detect the outer wings of M51 down to S/N of 0.25 ...''.
The reported signal-to-noise ratio (S/N) value ``0.25'' depends on the analysis and is an output of the analysis just like paper's figures and plots.
Manually typing the number in the \LaTeX{} source is prone to very important bugs: the author may forget to check it after a change in an analysis (e.g., using a newer version of the software, or changing an analysis parameter for another part of the paper).
Given the evolution of a scientific projects, this type of human error is very hard to avoid when such values are manually written.
Such values must also be automatically generated.

To automatically generate and blend them in the text, we use \LaTeX{} macros.
In the quote above, the \LaTeX{} source\footnote{\citet{akhlaghi19} uses this template to be reproducible, so its \LaTeX{} source is available in multiple ways: 1) direct download from arXiv:\href{https://arxiv.org/abs/1909.11230}{1909.11230}, by clicking on ``other formats'', or 2) the Git or \href{https://doi.org/10.5281/zenodo.3408481}{zenodo.3408481} links is also available on arXiv.} looks like this: ``\inlinecode{\small detect the outer wings of M51 down to S/N of \$\textbackslash{}demo\-sf\-optimized\-sn\$}''.
The \LaTeX{} macro ``\inlinecode{\small\textbackslash{}demosfoptimizedsn}'' is automatically calculated and recorded during in the project and expands to the value ``\inlinecode{0.25}''.
The automatically generated file \inlinecode{project.tex} stores all such inline output macros.
Furthermore, Figure \ref{fig:datalineage} shows that it is a prerequisite of \inlinecode{paper.pdf} (as well as the manually written \LaTeX{} sources that are shown in green).
Therefore \inlinecode{paper.pdf} will not be built until this file is ready and up-to-date.

However, managing all the necessary \LaTeX{} macros for a full project in one file is against the modularity principle and can be frustrating and buggy.
To address this problem, all subMakefiles \emph{must} contain a fixed target with the same base-name, but with a \inlinecode{.tex} suffix.
For example in Figure \ref{fig:datalineage}, assume \inlinecode{out-1b.dat} is a table and the mean of its third column must be reported in the paper.
Therefore in \inlinecode{format.mk}, a prerequisite of \inlinecode{format.tex} is \inlinecode{out-1b.dat} (as shown by the arrow in Figure \ref{fig:datalineage}).
The recipe of this rule will calculate the mean of the column and put it in the \LaTeX{} macro which is written in \inlinecode{format.tex}.
In a similar way, any other reported calculation from \inlinecode{format.mk} is stored as a \LaTeX{} macro in \inlinecode{format.tex}.

These \LaTeX{} macro files thus form the core skeleton of the project: as shown in Figure \ref{fig:datalineage}, the outward arrows of all built files of any subMakefile ultimately leads to one of these \LaTeX{} macro files.
Note that \emph{built} files in a subMakefile don't have to be a prerequisite of its \inlinecode{.tex} file.
They may point to another Makefile's \LaTeX{} macro file.
For example even though \inlinecode{input1.dat} is a target in \inlinecode{download.mk}, it isn't a prerequisite of \inlinecode{download.tex}, it is a prerequisite of \inlinecode{out-2a.dat} (a target in \inlinecode{demo-plot.mk}).
The lineage ultimate ends in a \LaTeX{} macro file in \inlinecode{analysis3.tex}.





\subsubsection{Verification of outputs (\inlinecode{verify.mk})}
\label{sec:outputverification}
An important principle for this template is that outputs should be automatically verified, see Section \ref{principle:verify}.
However, simply confirming the checksum of the final PDF, or figures and datasets is not generally possible: as mentioned in Section \ref{principle:verify}, many tools that produce datasets or PDFs write the creation date into the produced files. % should replace `PDFs` with `PDF files`?
Therefore it is necessary to verify the project's outputs before the PDF is created.
To facilitate output verification, the project has a \inlinecode{verify.mk} Makefile, see Figure \ref{fig:datalineage}.
It is the only prerequisite of \inlinecode{project.tex} that was described in Section \ref{sec:paperpdf}.
Verification is therefore the connection-point, or bottleneck, between the analysis steps of the project and its final report.

Prior to publication, the project authors should add the MD5 checksums of all the\LaTeX{} macro files in the recipe of \inlinecode{verify\-.tex}.
The necessary structure is already there, so adding/changing the values is trivial.
If any \LaTeX{} macro is different in future builds of the project, the project will abort with a warning of the problematic file.
When projects involve other outputs (for example images, tables or datasets that will also be published), their contents should also be validated.
To do this, prerequisites should be added to the \inlinecode{verify\-.tex} rule that automatically check the \emph{contents} of other project outputs.
Recall that many tools print the creation date automatically when creating a file, so to verify a file, this kind of metadata must be ignored.
\inlinecode{verify\-.tex} contains some Make functions to facilitate checking with some some file formats, others can be added easily.





\subsubsection{Project initialization (\inlinecode{initialize.mk})}
\label{sec:initialize}
The \inlinecode{initial\-ize\-.mk} subMakefile is present in all projects and is the first subMakefile that is loaded into \inlinecode{top-make.mk} (see Figure \ref{fig:datalineage}).
Project authors rarely need to modify/edit this file, it is part of the template's low-level infra-structure.
Nevertheless, project authors are strongly encouraged to study it and use all the useful variables and targets that it defines.
\inlinecode{initial\-ize\-.mk} doesn't contain any analysis or major processing steps, it just initializes the system.
For example it sets the necessary environment variables, internal Make variables and defines generic rules like \inlinecode{./project make clean} (to clean/delete all built products, not software) or \inlinecode{./project make dist} (to package the project into a tarball for distribution) among others.

It also adds one special \LaTeX{} macro in \inlinecode{initial\-ize\-.tex}: the current Git commit that is generated every time the analysis is run.
It is stored in \inlinecode{{\footnotesize\textbackslash}projectversion} macro and can be used anywhere within the final report.
For this PDF it has a value of \inlinecode{\projectversion}.
One good place to put it is in the end of the abstract for any reader to be able to identify the exact point in history that the report was created.
It also uses the \inlinecode{--dirty} feature of Git's \inlinecode{--describe} output: if any version-controlled file is not already committed, the value to this macro will have a \inlinecode{-dirty} suffix.
If its in a prominent place (like the abstract), it will always remind the author to commit their work.





\subsubsection{Importing and validating inputs (\inlinecode{download.mk})}
\label{sec:download}
The \inlinecode{download.mk} subMakefile is present in all Maneage projects and contains the common steps for importing the input dataset(s) into the project.
All necessary input datasets to the project are imported through this subMakefile.
This helps in modularity and minimal complexity (Sections \ref{principle:modularity} \& \ref{principle:complexity}): to see what external datasets were used in a project, this is the only necessary file to manage/read.
Also, a simple call to a downloader (for example \inlinecode{wget}) is not usually enough.
Irrespective of where the dataset is \emph{used} in the project's lineage, some operations are always necessary when importing datasets:
\begin{itemize}
\item The file may already be present on the host, or the user may not have internet connection.
  Hence it is necessary to check the given \emph{input directory} on the host before attempting to download over the internet (see Section \ref{sec:localdirs}).
\item The network might temporarily fail, but connect with an automatic re-trial (even today this isn't uncommon).
  Crashing the whole analysis because of a temporary network issue, requires human intervention and is against the completeness principle (Section \ref{principle:complete}).
\item Before it can be used, the integrity of the imported file must be confirmed with its stored checksum.
\end{itemize}

In some scenarios the generic download script may not be useful.
For example when the database takes queries and generates the dataset for downloading on the fly.
In such cases, users can add their own Make rules in this \inlinecode{download.mk} to import the file.
They can use its pre-defined structure to do the extra steps like validating it.
Note that in such cases the servers often encode the creation date and version of their database system in the resulting file as metadata.
Even when the actual data is identical, this metadata (which is in the same file) will differ based on the moment the query was done.
Therefore a simple checksum of the whole downloaded file can't be used for validation in such scenarios, see Section \ref{principle:verify}.

\begin{figure}[t]
  \input{tex/src/figure-inputconf.tex}
  \vspace{-3mm}
  \caption{\label{fig:inputconf} Contents of the \inlinecode{INPUTS.conf} file for the demonstration dataset of \citet{menke20}.
    This file contains the basic, or minimal, metadata for retrieving the required dataset(s) of a project: it can become arbitrarily long.
    Here, \inlinecode{M20DATA} contains the name of this dataset within this project.
    \inlinecode{MK20MD5} contains the MD5 checksum of the dataset, in order to check the validity and integrity of the dataset before usage.
    \inlinecode{MK20SIZE} contains the size of the dataset in human readable format.
    \inlinecode{MK20URL} is the URL which the dataset is automatically downloaded from (only when its not already present on the host).
    Note that the original URL (footnote \ref{footnote:dataurl}) was too long to display properly here.
  }
\end{figure}

Each external dataset has some basic information, including its expected name on the local system (for offline access), the necessary checksum to validate it (either the whole file or just its main ``data''), and its URL/PID.
In this template, such information regarding a project's input dataset(s) is in the \inlinecode{INPUTS.conf} file.
See Figures \ref{fig:files} \& \ref{fig:datalineage} for the position of \inlinecode{INPUTS.conf} in the project's file structure and data lineage respectively.
For demonstration, in this paper, we are using the datasets of \citet{menke20} which are stored in one \inlinecode{.xlsx} file on bioXriv\footnote{\label{footnote:dataurl}Full data URL: \url{\menketwentyurl}}.
Figure \ref{fig:inputconf} shows the corresponding \inlinecode{INPUTS.conf}, show the necessary information as Make variables for this dataset.

\begin{figure}[t]
  \input{tex/src/figure-src-download.tex}
  \vspace{-3mm}
  \caption{\label{fig:download} Simplified Make rule, showing how the downloaded data URL is written into this paper (in Footnote \ref{footnote:dataurl}).
    In Make, lines starting with a \inlinecode{\#} are ignored (thus used for human-readable comments, like the red line shown here).
    The \emph{target} is placed before a colon (\inlinecode{:}) and its \emph{prerequisite(s)} is(are) after the colon.
    Here, both the target and prerequisite can be seen in the second line.
    The executable \emph{recipe} lines (shell commands to build the target from the prerequisite), start with a \inlinecode{TAB} (shown here with a light gray \inlinecode{\_\_\_TAB\_\_\_}).
    A Make recipe is an independent, or containerized, shell script.
    In the recipe, \inlinecode{\$@} is an \emph{automatic variable}, expanding to the target file's name.
    For \inlinecode{MK20URL}, see Figure \ref{fig:inputconf}.
    The same URL is then passed to this paper through the definition of the \LaTeX{} variable \inlinecode{\textbackslash{}menketwentyurl} that is written in \inlinecode{\$(mtexdir)/download.tex}.
    Later, when the paper's PDF is being built, this \inlinecode{.tex} file is loaded into it.
    \inlinecode{\$(mtexdir)} is the directory hosting all the \LaTeX{} macro files for various stages of the analysis, see Section \ref{sec:valuesintext}.
  }
\end{figure}

If \inlinecode{menke20.xlsx} exists on the \emph{input} directory, it will just be validated and put it in the \emph{build} directory.
Otherwise, it will be downloaded from the given URL, validated, and put it in the build directory.
Recall that the input and build directories differ from system to system and are specified at project configuration time, see Section \ref{sec:localdirs}.
In the data lineage of Figure \ref{fig:datalineage}, the arrow from \inlinecode{INPUTS.conf} to \inlinecode{menke20.xlsx} symbolizes this step.
Note that in our notation, once an external dataset is imported, it is a \emph{built} product, it thus has a blue box in Figure \ref{fig:datalineage}.

It is sometimes necessary to report basic information about external datasets in the report/paper.
As described in Section \ref{sec:valuesintext}, here this is done with \LaTeX{} macros to avoid human error.
For example in Footnote \ref{footnote:dataurl}, we gave the full URL that this dataset was downloaded from.
In the \LaTeX{} source of that footnote, this URL is stored as the \inlinecode{\textbackslash{}menketwentyurl} macro which is created with the simplified\footnote{This Make rule is simplified by removing the directory variable names to help in readability.} Make rule below (it is located at the bottom of \inlinecode{download.mk}).

In this rule, \inlinecode{download.tex} is the \emph{target} and \inlinecode{menke20.xlsx} is its \emph{prerequisite}.
The \emph{recipe} to build the target from the prerequisite is the \inlinecode{echo} shell command which writes the \LaTeX{} macro definition as a simple string (enclosed in double-quotes) into the \inlinecode{download.tex}.
The target is built after the prerequisite(s) are built, or when the prerequisites are newer than the target (for more on Make, see Appendix \ref{appendix:make}).
Note that \inlinecode{\$(MK20URL)} is a call to the variable defined above in \inlinecode{INPUTS.conf}.
Also recall that in Make, \inlinecode{\$@} is an \emph{automatic variable}, which is expanded to the rule's target name (in this case, \inlinecode{download.tex}).
Therefore if the dataset is re-imported (possibly with a new URL), the URL in Footnote \ref{footnote:dataurl} will also be re-created automatically.
In the data lineage of Figure \ref{fig:datalineage}, the arrow from \inlinecode{menke20.xlsx} to \inlinecode{download.tex} symbolizes this step.





\subsubsection{The analysis}
\label{sec:analysis}

The analysis subMakefile(s) are loaded into \inlinecode{top-make.mk} after the initialization and download steps (see Sections \ref{sec:download} and \ref{sec:initialize}).
However, the analysis phase involves much more complexity.
If done without modularity in mind from the start, research project sources can become very long, thus becoming hard to modify, debug, improve or read.
Maneage is therefore designed to encourage and facilitate splitting the analysis into multiple/modular subMakefiles.
For example in the data lineage graph of Figure \ref{fig:datalineage}, the analysis is broken into three subMakefiles: \inlinecode{format.mk}, \inlinecode{demo-plot.mk} and \inlinecode{analysis3.mk}.

Theoretical discussion of this phase can be hard to follow, we will thus describe a demonstration project on data from \citet{menke20}.
In Section \ref{sec:download}, the process of importing this dataset into the project was described.
The first issue is that \inlinecode{menke20.xlsx} must be converted to a simple plain-text table which is generically usable by simple tools (see the principle of minimal complexity in Section \ref{principle:complexity}).
For more on the problems with Microsoft Office and this format, see Section \ref{sec:lowlevelanalysis}.
In \inlinecode{format.mk} (Figure \ref{fig:formatsrc}), we thus convert it to a simple white-space separated, plain-text table (\inlinecode{menke20-table-3.txt}) and do a basic calculation on it.

\begin{figure}[t]
  \input{tex/src/figure-src-format.tex}
  \vspace{-3mm}
  \caption{\label{fig:formatsrc}Simplified contents of \inlinecode{format.mk}.
    Here, we want to convert the downloaded XLSX dataset (Office Open XML Workbook format) to a simple plain-text fixed-width-per-column table.
    For the position of this subMakefile in the full project's data lineage, see Figure \ref{fig:datalineage}.
    In particular, here the arrows of that figure from \inlinecode{menke20.xlsx} to \inlinecode{menke20-table-3.txt} and from the latter to \inlinecode{format.tex} are shown as the second and third Make rules.
    See Figure \ref{fig:download} and Appendix \ref{appendix:make} for more on the Make notation and Section \ref{sec:analysis} for describing the steps.
  }
\end{figure}

As shown in Figure \ref{fig:formatsrc}, the first operation (or Make \emph{rule}) is to define a directory to keep the generated files.
To keep the source and build-directories separate, we thus define \inlinecode{a1dir} under the build-directory (\inlinecode{BDIR}, see Section \ref{sec:localdirs}).
We'll then define all outputs/targets to be under this directory.
The second rule (which depends on the directory as a prerequisite), then converts the Microsoft Excel spreadsheet file to a simple plain-text format using the XLSX I/O program.
But XLSX I/O only converts to CSV and we don't need all the columns here, so we further shorten and modify the table (re-order columns and multiply them) using the AWK program (which is available on any Unix-like operating system).
In Figure \ref{fig:datalineage} on the example data lineage, this second rule is shown with the arrow from \inlinecode{menke20.xlsx} to \inlinecode{menke20-table-3.txt}.

Finally, as described in Section \ref{sec:valuesintext}, the last rule of a subMakefile should be a \LaTeX{} macro file (in Figure \ref{fig:formatsrc}, this is the third rule).
Ending each analysis phase with a \LaTeX{} macro is natural in many papers/reports.
For example, here, once the dataset is ready, we want to give the reader a general view of the dataset size.
We thus need to report the number of subjects studied in \citet{menke20}.
Therefore in the \LaTeX{} macro rule, we count them from the simplified table of the second rule.
In both cases, we write the sum as a temporary shell variable \inlinecode{v}, which is respectively written into these two \LaTeX{} macros \inlinecode{\textbackslash{}menkenumpapers} and \inlinecode{\textbackslash{}menkenumjournals}.
In the built PDF paper, they expand to $\menkenumpapers$ (number of papers studied) and $\menkenumjournals$ (number of journals studied) respectively.
This rule is shown schematically in Figure \ref{fig:datalineage} with the arrow from \inlinecode{menke20-table-3.txt} to \inlinecode{format.tex}.

To further demonstrate the concept, we'll reproduce (with some enhancements) Figure 1C of \citet{menke20} in Figure \ref{fig:toolsperyear}.
Figure \ref{fig:toolsperyear} also shows the number of papers that were studied each year in the same plot (unlike the original plot).
Its horizontal axis also shows the full range of the data (starting from $\menkefirstyear$) while the original Figure 1C in \citet{menke20} starts from 1997.
The reason \citet{menke20} decided to avoid earlier years was probably the small number of papers before 1997.
For example in \menkenumpapersdemoyear, they had only studied \menkenumpapersdemocount{} papers.
Note that both the numbers of this sentence, and the first year of data mentioned above, are actually \LaTeX{} macros, see Figure \ref{fig:demoplotsrc}).

\begin{figure}[t]
  \begin{center}
    \includetikz{figure-tools-per-year}
  \end{center}
  \vspace{-5mm}
  \caption{\label{fig:toolsperyear}Fraction of papers mentioning software tools (green line, left vertical axis) to total number of papers studied in that year (light red bars, right vertical axis in log-scale).
    Data from \citet{menke20}.
    The subMakefile archiving the executable lineage of figure's data is shown in Figure \ref{fig:demoplotsrc} and discussed in Section \ref{sec:analysis}.
  }
\end{figure}

The operation of reproducing that figure is a contextually separate operation from the operations that were described above in \inlinecode{format.mk}.
Therefore we add a new subMakefile to the project called \inlinecode{demo-plot.mk}, which is shown in Figure \ref{fig:demoplotsrc}.
As before, in the first rule, we make the directory to host the data (\inlinecode{a2dir}).
However, unlike before, this directory is placed under \inlinecode{texdir} which is the directory hosting all \LaTeX{} related files.
This is because the plot of Figure \ref{fig:toolsperyear} is directly made within \LaTeX{}, using its PGFPlots package\footnote{PGFPLots package of \LaTeX: \url{https://ctan.org/pkg/pgfplots}.
  \inlinecode{texdir} has some special features when using \LaTeX{}, see Section \ref{sec:buildingpaper}.
  PGFPlots uses the same graphics engine that is building the paper, producing a high quality figure that blends nicely in the paper.}.
Note that this is just our personal choice, other methods of generating plots (for example with R, Gnuplot or Matplotlib) are also possible within this system.
As with the input data files of PGFPlots, it is just necessary to put the files that are loaded into \LaTeX{} under the \inlinecode{\$(BDIR)/tex} directory, see Section \ref{sec:publishing}.

The plain-text table that is used to build Figure \ref{fig:toolsperyear} is defined as the variable \inlinecode{a2mk20f1c} of Figure \ref{fig:demoplotsrc} (just above the second rule).
As shown in the second rule, again we use GNU AWK to extract the necessary information from \inlinecode{mk20tab3} (which was built in \inlinecode{format.mk}).
\inlinecode{mk20tab3} is thus the \emph{prerequisite} of \inlinecode{a2mk20f1c} (along with \inlinecode{a2dir}).
In Figure \ref{fig:datalineage}, this lineage is shown as the arrow from \inlinecode{menke20-table-3.txt} (file name of \inlinecode{mk20tab3}) that points to \inlinecode{tools-per-year.txt} (file name of \inlinecode{a2mk20f1c}).

As with all subMakefiles, \inlinecode{demo-plot.mk} finishes with the rule to build its \LaTeX{} macro file (\inlinecode{demo-plot.tex}) containing the values reported above.
But here, it doesn't just depend on \inlinecode{a2mk20f1c}, it also depends on the \inlinecode{menke-demo-year.conf} configuration file.
This is also visible in the data lineage (Figure \ref{fig:datalineage}): two arrows point to \inlinecode{demo-plot.tex}, one from a configuration file, and one from a built file.
Configuration files are discussed in more detain in Section \ref{sec:configfiles}.

\begin{figure}[t]
  \input{tex/src/figure-src-demoplot.tex}
  \vspace{-2mm}
  \caption{\label{fig:demoplotsrc}Contents of \inlinecode{analysi2.mk} subMakefile used to generate the data for Figure \ref{fig:toolsperyear}.
  }
\end{figure}

In a similar manner many more subMakefiles can be added in more complex analysis scenarios. % should subMakefiles have a special kind of formatting? Such as monospaced, italics, etc?
This is shown with the lower opacity files and dashed arrows of the data lineage in Figure \ref{fig:datalineage}.
Generally, the files created within one subMakefile don't necessarily have to be a prerequisite of its \LaTeX{} macro.
For example see \inlinecode{demo-out.dat} in Figure \ref{fig:datalineage}: it is managed in \inlinecode{demo-plot.mk}, however, it isn't a prerequisite of \inlinecode{demo-plot.tex}, it is a prerequisite of \inlinecode{out-3b.dat} (which is managed in \inlinecode{another-step.mk} and is a prerequisite of \inlinecode{another-step.tex}).
Hence ultimately, through another file, it's descendants conclude in a \LaTeX{} macro.

The high-level \inlinecode{top-make.mk} file is designed to simplify the addition of new subMakefiles for the authors, and reading the source for readers (see Section \ref{sec:highlevelanalysis}).
As mentioned before, this high-level Makefile just defines the ultimate target (\inlinecode{paper.pdf}, see Section \ref{sec:paperpdf}) and imports all the subMakefiles in the specific order.
For example Figure \ref{fig:topmake} shows this project's \inlinecode{top-make.mk}.
When descriptive names are chosen for the subMakefiles, a simple glance over the values to \inlinecode{makesrc} here provides a general understanding of the project without needing to get into the technical details.

\begin{figure}[t]
  \input{tex/src/figure-src-topmake.tex}
  \vspace{-3mm}
  \caption{\label{fig:topmake} General view of the High-level \inlinecode{top-make.mk} Makefile which manages the project's analysis that is in various subMakefiles.
    See Figures \ref{fig:files} \& \ref{fig:datalineage} for its location in the project's file structure and its data lineage, as well as the subMakefiles it includes.
  }
\end{figure}



\subsubsection{Configuration files}
\label{sec:configfiles}

The analysis subMakefiles discussed above in Section \ref{sec:analysis} should only contain the organization of an analysis, they should not contains any fixed numbers, settings or parameters.
Such elements should only be used as variables that are defined elsewhere.
In the data lineage plot of Figure \ref{fig:datalineage}, configuration files are shown as the sharp-edged, green \inlinecode{*.conf} files in the top row.

The last recipe of Figure \ref{fig:demoplotsrc} is a good demonstration of their usage: in Section \ref{sec:analysis}, we reported the number of papers studied by \citet{menke20} in \menkenumpapersdemoyear.
However, note that in Figure \ref{fig:demoplotsrc}, the year's number is not written by hand in the subMakefile.
It is referenced through the \inlinecode{menke-year-demo} variable, which is defined in \inlinecode{menke-demo-year.conf}, that is a prerequisite of the \inlinecode{demo-plot.tex} rule.
This is also visible in the data lineage of Figure \ref{fig:demoplotsrc}.

All the configuration files of a project are placed under the \inlinecode{reproduce/analysis/config} (see Figure \ref{fig:files}) subdirectory, and are loaded into \inlinecode{top-make.mk} before any of the subMakefiles, see Figure \ref{fig:topmake}.
The configuration files greatly simplify project management from multiple perspectives as listed below:

\begin{itemize}
\item If an analysis parameter is used in multiple places within the project, simply changing the value in the configuration file will change it everywhere in the project.
  This is cortical in more complex projects and if not done like this can lead to significant human error.
\item Configuration files enable the logical separation between the low-level implementation and high-level running of a project.
  For example after writing the project, the authors don't need to remember where the number/parameter was used, they can just modify the configuration file.
  Other co-authors, or readers, of the project also benefit: they just need to know that there is a unified place for high-level project settings, parameters, or numbers without necessarily having to know the low-level implementation.
\item A configuration file will be a prerequisite to any rule that uses it's value.
  If the configuration file is updated (the value/parameter is changed), Make will automatically detect the data lineage branch that is affected by it and re-execute only that branch, without any human interference.
\end{itemize}

This is a great leap compared the current, mostly manual, project management that many scientists employ.
Manual management is prone to serious human error factors: at the later phases of a project, scientists are least likely to experiment on their project's configurations.
However, the later phases of a project are precisely the times where the lower-level parts of the project are complete and the authors can look at the bigger picture.
This style of managing project parameters therefore produces a much more healthy scientific result where experimentation is very cheap during all phases of a project; before its publication (by the authors) and after it (by the authors and readers).




\subsection{Projects as Git branches of Maneage}
\label{sec:starting}

Maneage is fully composed of plain-text files distributed in a directory structure (see Sections \ref{principle:text} \& \ref{sec:generalimplementation} and Figure \ref{fig:files}).
Therefore it can be maintained under under version control systems like Git (for more on version control, see Appendix \ref{appendix:versioncontrol}).
Every commit in the version controlled history contains \emph{a complete} snapshot of the data lineage, for more see the completeness principle in Section \ref{principle:complete}.

Maneage is maintained by its developers in a central branch, which we'll call \inlinecode{man\-eage} hereafter.
The \inlinecode{man\-eage} branch contains all the low-level infrastructure, or skeleton, that is necessary for any project.
This skeleton is primarily the configuration features discussed in Section \ref{sec:projectconfigure}\footnote{Recall that project configuration files are located under \inlinecode{reproduce/software} in Figure \ref{fig:files}, and executed with the \inlinecode{./project configure} command.}.
The \inlinecode{maneage} branch only contains a minimal demonstration analysis in order to be complete\footnote{The names of all the files related to the demonstration of the \inlinecode{maneage} branch have a \inlinecode{delete-me} prefix to highlight that they must be deleted when starting a new project.}.

To start a new project, users simply clone it from its reference repository and build their own Git branch over the most recent commit.
This is demonstrated in the first phase of Figure \ref{fig:branching}: the project started by branching of commit \inlinecode{0c120cb} in \inlinecode{maneage}.
They can then start customizing Maneage for their project and adding their high-level analysis in their own branch and push it to their own Git repository.
Manages contains a file called \inlinecode{README-hacking.md} that has a complete checklist of steps to start a new project and remove demonstration parts.
This file is updated on the \inlinecode{maneage} branch and will always be up-to-date with the low-level infrastructure.

%% Exact URLs of imported images.
%% Collaboration icon: https://www.flaticon.com/free-icon/collaboration_809522
%% Paper done:         https://www.flaticon.com/free-icon/file_2521838
%% Paper processing:   https://www.flaticon.com/free-icon/file_2521989
\begin{figure}[t]
  \includetikz{figure-branching}
  \vspace{-3mm}
  \caption{\label{fig:branching} Projects start by branching off the main Maneage branch and developing their high-level analysis over the common low-level infrastructure: add flesh to a skeleton.
    The low-level infrastructure can always be updated (keeping the added high-level analysis intact), with a simple merge between branches.
    Two phases of a project's evolution shown here: in phase 1, a co-author has made two commits in parallel to the main project branch, which have later been merged.
    In phase 2, the project has finished: note the identical first project commit and the Maneage commits it branches from.
    The dashed parts of Scenario 2 can be any arbitrary history after those shown in phase 1.
    A second team now wants to build upon that published work in a derivate branch, or project.
    The second team applies two commits and merges their branch with Maneage to improve the skeleton and continue their research.
    The Git commits are shown on their branches as colored ellipses, with their hash printed in them.
    The commits are colored based on the team that is working on that branch.
    The collaboration and paper icons are respectively made by `mynamepong' and `iconixar' and downloaded from \url{www.flaticon.com}.
  }
\end{figure}

After a project starts, Maneage will evolve.
For example new features will be added, low-level bugs will be fixed that are useful for any project.
Because all the changes in Maneage are committed on the \inlinecode{maneage} branch (that projects also branch-off from) updating the project's low-level infra-structure is as easy as merging the \inlinecode{maneage} branch into the project's branch.
For example see how Maneage's \inlinecode{3c05235} commit has been merged into project's branch trough commit \inlinecode{2ed0c82} in Figure \ref{fig:branching} (phase 1).

This doesn't just apply to the pre-publication phase, when done in Maneage, a project can be revived at any later date by other researchers as shown in phase 2 of Figure \ref{fig:branching}.
In that figure, a new team of researchers have decided to experiment on the results of the published paper and have merged it with the Maneage branch (commit \inlinecode{a92b25a}) to fix some possible portability problem for their operating system that was fixed as a bug in Maneage after the paper's publication.
Propagating bug fixes or improvements in the low-level infrastructure to all projects using Maneage has been one of the reasons it evolved so well over the last 5 years, see Section \ref{sec:futurework}.
As we started using it in more projects by more users, many bugs were found and improvements made.
We would then implement it in Maneage and that fix would propagate to all other projects automatically in their next merge.

Other scenarios include a third project that can easily merge various high-level components from different projects into its own branch, thus adding a temporal dimension to their data lineage.
Modern version control systems provide many more capabilities that can be exploited through Maneage in project management, thanks to the shared branch it has with \emph{all} projects that use it.





\subsection{Multi-user collaboration on single build directory}
\label{sec:collaborating}

Because the project's source and build directories are separate, it is possible for different users to share a build directory, while working on their own separate project branches during a collaboration.
Similar to the parallel branch that is later merged in phase 1 of Figure \ref{fig:branching}.

To give all users privilege, Maneage assumes that they are in the same (POSIX) user group of the system.
All files built in the build directory are then automatically assigned to this user group, with read-write permissions for all group members (\inlinecode{-rwxrwx---}), through the \inlinecode{sg} and \inlinecode{umask} commands that are prepended to the call to Make.
The \inlinecode{./project} script has a special \inlinecode{--group} option which activates this mode in both configuration and analysis phases.
It takes the user group name as its argument and the built files will only be accessible by the group members, even when the shared location is accessible by people outside the project.

When multiple project members are contributing on a shared build directory, they usually work on independent parts of the project which won't cause any conflict.
When there is a conflict, a member can temporarily change the name of the part's top directory within their branch.
For example if Alice is working on the \inlinecode{demo-plot.mk} (Figure \ref{fig:demoplotsrc}) in parallel with others, she can set \inlinecode{a2dir} to be \inlinecode{\$(texdir)/tools-per-year-alice}.
Other project members can also compare her results in this way and once it is merged into the master branch, \inlinecode{a2dir} can be set to its original value.

The project already applies this strategy for part of the project that runs \LaTeX{} to build the final report.
This is because project members will usually also be editing their parts of the report/paper as they progress.
To fix this, when the project is configured and built with \inlinecode{--group}, each project member's user-name will be appended to the \LaTeX{} build directory (which is under \inlinecode{\$(BDIR)/tex}).

However, human error is inevitable, so when the project takes long in some phases, the user and group write-permission flags can be manually removed from the respective subdirectories under \inlinecode{\$(BDIR)} until the project is to be built from scratch; maybe for a test prior to submission.




\subsection{Publishing the project}
\label{sec:publishing}

Once the project is complete, publishing the project is the final step.
In a scientific scenario, it is public
As discussed in the various steps before, the source of the project (the software configuration, data lineage and narrative text) is fully in plain text, greatly facilitating the publication of the project.


\subsubsection{Automatic creation of publication tarball}
\label{sec:makedist}
To facilitate the publication of the project source, Maneage has a special \inlinecode{dist} target during the build process which is activated with the command \inlinecode{./project make dist}.
In this mode, Maneage will not do any analysis, it will simply copy the full project's source (on the given commit) into a temporary directory and compress it into a \inlinecode{.tar.gz} file.
If a Zip compression is necessary, the \inlinecode{dist-zip} target can be called instead \inlinecode{dist}.

The \inlinecode{dist} tarball contains the project's full data lineage and is enough to reproduce the full project: it can build the software, download the data, run the analysis, and build the final PDF.
However, it doesn't contain the Git history, it is just a checkout of one commit.
Instead of the history, it contains all the necessary \emph{built products} that go into building the final paper without the analysis: for example the used plots, figures, tables, and \inlinecode{project.tex}, see Section \ref{sec:valuesintext}.
As a result, the tarball can be \emph{also} only build the final report with a simple \inlinecode{pdflatex paper} command \emph{without} running \inlinecode{./project}.
When the project is distributed as a tarball (not as a Git repository), building the report may be the main purpose, like the arXiv distribution scenario discussed below, the data lineage (under the \inlinecode{reproduce/} directory) is likely just a supplement.

\subsubsection{What to publish, and where?}
\label{sec:whatpublish}
The project's source, which is fully in hand-written plain-text, has a very small volume, usually much less than one megabyte.
However, the necessary input files (see Section \ref{definition:input}) and built datasets may be arbitrarily large, from megabytes to petabytes or more.
Therefore, there are various scenarios for the publication of the project as described below:

\begin{itemize}
\item \textbf{Only source:} Publishing the project source is very easy because it only contains plain-text files with a very small volume: a commit will usually be on the scale of $\times100kB$. With the Git history, it will usually only be on the scale of $\sim5MB$.

  \begin{itemize}
  \item \textbf{Public Git repository:} This is the simplest publication method.
    The project will already be on a (private) Git repository prior to publication.
    In such cases, the private configuration can be removed so it becomes public.
  \item \textbf{In journal or PDF-only preprint systems (e.g., bioRxiv):} If the journal or pre-print server allows publication of small supplement files to the paper, the commit that produced the final paper can be submitted as a compressed file, for example with the
  \item \textbf{arXiv:} Besides simply uploading a PDF pre-print, on arXiv, it is also possible to upload the \LaTeX{} source of the paper.
    arXiv will run its own internal \LaTeX{} engine on the uploaded files and produce the PDF that is published.
    When the project is published, arXiv also allows users to anonymously download the \LaTeX{} source tarball that the authors uploaded\footnote{In the current arXiv user interface, the tarball is available by clicking the ``Other formats'' link on the paper's main page, and then clicking ``Download source'', it can be checked with \url{https://arxiv.org/abs/1909.11230} of \citet{akhlaghi19}.}.
    Therefore, simply uploading the tarball from the \inlinecode{./project make dist} command is sufficient for arXiv, and will allow the full project data lineage to also be published there with the \LaTeX{} source.
    We done this in \citet[arXiv:1909.11230]{akhlaghi19} and \citet[arXiv:1911.01430]{infante20}.
    Since arXiv is mirrored in many institutes over the planet, this is a robust way to preserve the reproducible lineage.
  \item \textbf{In output datasets:} Many data storage formats support an internal structure with the data file.
    One commonly used example today is the Hierarchical Data Format (HDF), and in particular its HDF5 which can host a complex filesystem in POSIX syntax.
    It is even used by some reproducible analysis solutions like the Active papers project \citet[for more, see Appendix \ref{appendix:activepapers}]{hinsen11}.
    Since the volume of the project source is so insignificant compared to the output datasets of most projects, the whole project source can be stored with each published data file if the format supports it.
  \end{itemize}
\item \textbf{Source and data:} The project inputs (including the software tarballs, or possible datasets) may have a large volume.
  Publishing them with the source is thus not always possible.
  However, based on the definition of inputs in Section \ref{definition:input}, they are usable in other projects: another project may use the same data or software source code, in a different way.
  Therefore even when published with the source, it is encouraged to publish them as separate files.

  For example strategy was followed in \href{https://doi.org/10.5281/zenodo.3408481}{zenodo.3408481}\footnote{https://doi.org/10.5281/zenodo.3408481} which supplements \citet{akhlaghi19} which contains the following files.

  \begin{itemize}
  \item \textbf{Final PDF:} for easy understanding of the project.
  \item \textbf{Git history:} as the Git ``bundle'' of the project.
    This single file contains the full Git history of the project until its publication date (only 4Mb), see Section \ref{sec:starting}.
  \item \textbf{Project source tarball}: output of \inlinecode{./project make dist}, as explained above.
  \item \textbf{Tarballs of all necessary software:} This is necessary in case the software webpages is not accessible for any reason at a later date or the project must be run with no internet access.
    This is only possible because of the free software principle discussed in Section \ref{principle:freesoftware}.
  \end{itemize}

  Note that \citet{akhlaghi19} used previously published datasets which are automatically accessed when necessary.
  Also, that paper didn't produce any output datasets beyond the figures shown in the report, therefore the Zenodo upload doesn't contain any datasets.
  When a project involves data collection, or added-value data products, they can also be uploaded with the files above.
\end{itemize}

\subsubsection{Worries about getting scooped!}
\label{sec:scooped}
Publishing the project source with the paper can have many benefits for the researcher and the larger community.
For example if the source is published with a pre-print, others my help the authors find bugs, or improvements to the source that can affect the validity or precision of the result, or simply optimize it so it does the same work in half the time for example.

However, one particular feedback raised by a minority of researchers is that publishing the project's reproducible data lineage immediately after publication may hamper their ability to continue harvesting from all their hard work.
Because others can easily reproduce the work, others may take the next follow-up project they originally intended to do.
This is informally known as getting scooped.

The level that this may happen is an interesting subject to be studied once many papers become reproducible.
But it is a valid concern that must be addressed.
Given the strong integrity checks in Maneage, we believe it has features to address this problem in the following ways:

\begin{enumerate}
\item This worry is essentially the 2nd phase of Figure \ref{fig:branching}.
  The commits of the other team are built up on the commits of the original authors.
  It is therefore perfectly clear (with the precision of a character!) how much of their result is purely their own work (qualitatively or quantitatively).
  In this way, Maneage can contribute to a new concept of authorship in scientific projects and help to quantify newton's famous ``standing on the shoulders of giants'' quote.
  However, this is a long term goal and requires major changes to academic value systems.
\item The authors can be given a grace period where the journal, or some third authority, keeps the source and publishes it a certain time after publication.
  In fact, journals can create specific policies for such scenarios, for example saying that all project sources will be available publicly, $N$ months/years after publication while allowing authors to opt-out of it if they like, so the source is published immediately with the paper.
  However, journals cannot expect exclusive copyright to distribute the project source, in the same manner  they do with the final paper.
  As discussed in the free software principle of Section \ref{principle:freesoftware}, it is critical that the project source be free for the community to use, modify and distribute.

  This can also be done by the authors on servers like Zenodo, where you can get the dataset's final DOI first, and publish at a later date.
  Reproducibility is indeed very important for the sciences, but the hard work that went into it should also be acknowledged for the authors that would like to publish the source at a later date.
\end{enumerate}




\subsection{Future of Maneage and its past}
\label{sec:futurework}
As with any software, the core architecture of Maneage will inevitably evolve after the publication of this paper.
The current version introduced here has already experienced 5 years of evolution and several reincarnations.
Its primordial implementation was written for \citet{akhlaghi15}.
This paper described a new detection algorithm in astronomical image processing.
The detection algorithm was developed as the paper was being written (initially a small report!).
An automated sequence of commands to build the figures, and update the paper/report was a practical necessity as the algorithm was evolving.
In particular, it didn't just reproduce figures, it also used \LaTeX{} macros to update numbers printed within the text.
Finally, since the full analysis pipeline was in plain-text and roughly 100kb (much less than a single figure), it was uploaded to arXiv with the paper's \LaTeX{} source, under a \inlinecode{reproduce/} directory, see \href{https://arxiv.org/abs/1505.01664}{arXiv:1505.01664}\footnote{
  To download the \LaTeX{} source of any arXiv paper, click on the ``Other formats'' link, containing necessary instructions and links.}.

The system later evolved in \citet{bacon17}, in particular the two sections of that paper that were done by M. Akhlaghi (first author of this paper): \citet[\href{http://doi.org/10.5281/zenodo.1163746}{zenodo.1163746}]{akhlaghi18a} and \citet[\href{http://doi.org/10.5281/zenodo.1164774}{zenodo.1164774}]{akhlaghi18b}.
With these projects, the skeleton of the system was written as a more abstract ``template'' that could be customized for separate projects.
The template later matured by including installation of all necessary software from source and used in \citet[\href{https://doi.org/10.5281/zenodo.3408481}{zenodo.3408481}]{akhlaghi19} and \citet[\href{https://doi.org/10.5281/zenodo.3524937}{zenodo.3524937}]{infante20}.
The short historical review above highlights how this template was created by practicing scientists, and has evolved and matured significantly.

We already have roughly 30 tasks that are left for the future and will affect various high-level phases of the project as described here.
However, the core of the system has been used and become stable enough already and we don't see any major change in the core methodology in the near future.
A list of the notable changes after the publication of this paper will be kept in in the project's \inlinecode{README-hacking.md} file.
Once the improvements become substantial, new paper(s) will be written to complement or replace this one.




















\section{Discussion}
\label{sec:discussion}

\begin{itemize}
\item Science is defined by its method, not its results.
  Just as papers are quality checked for a reasonable English (which is not necessary for conveying the final result), the necessities of modern science require a similar check on a reasonable review of the computation, which is easiest to check when the result is exactly reproducible.
\item Initiative such as \url{https://software.ac.uk} (UK) and \url{http://urssi.us} (USA) are good attempts at improving the quality of research software.
\item Hiring software engineers is not the solution: the language of science has changed.
  Could Galileo have created a telescope if he wasn't familiar with what a lens is?
  Science is not independent of its its tools.
\item The actual processing is archived in multiple places (with the paper on arXiv, with the data on Zenodo, on a Git repository, in future versions of the project).
\item As shown by the very common use of something like Conda, Software (even free software) is mainly seen in executable form, but this is wrong: even if the software source code is no longer compilable, it is still readable.
\item The software/workflow is not independent of the paper.
\item Cost of archiving is a critical issue (the NSF director mentions this during the first Panel of the National Academies meeting\footnote{\url{https://vimeo.com/367085708} (around minute 45:00)}).
\item Meta-science (or ``Science of science'', ``economics of science'', ``Research on research'') and its importance.
\item Provenance tracking (like some tools in Appendix \ref{appendix:existingsolutions}) is built-in and doesn't need any manual tagging.
\item Important that a project starts by following good practice \citep{fineberg19}, not an extra step in the end.
\item It is possible to write graphic user interface wrappers like those in Appendix \ref{appendix:existingsolutions}.
\item What is often discussed is ``taking data and apply different methods to it'', but an even more productive questions can be this: ``take (exact) methods and give different data to it''.
\item \citet{munafo19} discuss how collective action is necessary.
\item Research objects (Appendix \ref{appendix:researchobject}) can automatically be generated from the Makefiles, we can also apply special commenting conventions, to be included as annotations/descriptions in the research object metadata.
\item Provenance between projects: through Git, all projects based on this template are automatically connected, but also through inputs/outputs, the lineage of a project can be traced back to projects before it also.
\item \citet{gibney20}: After code submission was encouraged by the Neural Information Processing Systems (NeurIPS), the frac % incomplete
\item When the data are confidential, \citet{perignon19} suggest to have a third party familiar with coding to referee the code and give its approval.
  In this system, because of automatic verification of inputs and outputs, no technical knowledge is necessary for the verification.
\item \citet{miksa19b} Machine-actionable data management plans (maDMPs) embedded in workflows, allowing
\item \citet{miksa19a} RDA recommendation on maDMPs.
\item FAIR Principles \citep{wilkinson16}.
\item \citet{cheney09}: ``In both data warehouses and curated databases, tremendous (\emph{and often manual}) effort is usually expended in the construction''
\item https://arxiv.org/pdf/2001.11506.pdf
\item Apache NiFi for automated data flow.
\item \url{https://arxiv.org/pdf/2003.04915.pdf}: how data lineage can help machine learning.
\item Interesting patent on ``documenting data lineage'': \url{https://patentimages.storage.googleapis.com/c0/51/6e/1f3af366cd73b1/US10481961.pdf}
\item Automated data lineage extractor: \url{http://hdl.handle.net/20.500.11956/110206}.
\item Caveat: Many low-level tools.
\item High-level tools can be written to exploit the low-level features.
\end{itemize}





%% Acknowledgements
\section{Acknowledgments}
The authors wish to thank Pedram Ashofteh Ardakani, Zahra Sharbaf and Surena Fatemi for their useful suggestions and feedback on Maneage and this paper.
Work on the reproducible paper template has been funded by the Japanese Ministry of Education, Culture, Sports, Science, and Technology ({\small MEXT}) scholarship and its Grant-in-Aid for Scientific Research (21244012, 24253003), the European Research Council (ERC) advanced grant 339659-MUSICOS, European Union’s Horizon 2020 research and innovation programme under Marie Sklodowska-Curie grant agreement No 721463 to the SUNDIAL ITN, and from the Spanish Ministry of Economy and Competitiveness (MINECO) under grant number AYA2016-76219-P.
The reproducible paper template was also supported by European Union’s Horizon 2020 (H2020) research and innovation programme via the RDA EU 4.0 project (ref. GA no. 777388).

%% Tell BibLaTeX to put the bibliography list here.
\printbibliography



















\newpage
\appendix
\noindent
    {\Large\bf Appendices}\\
\vspace{-5mm}
\section{Survey of existing tools for various phases}
\label{appendix:existingtools}

Conducting a reproducible research project is a high-level process, which involves using various lower-level tools.
In this section, a survey of the most commonly used lower-level tools for various aspects of a reproducible project is presented with an introduction as relates to reproducibility and the proposed template.
In particular, we focus on the tools used within the proposed template and also tools that are used by the existing reproducible framework that is reviewed in Appendix \ref{appendix:existingsolutions}.
Some existing solution to for managing the different parts of a reproducible workflow are reviewed here.



\subsection{Independent environment}
\label{appendix:independentenvironment}

There are three general ways of controlling the environment: 1) Virtual machines, 2) Containers, 3) controlled build and environment.
Below, a short description of each solution is provided.

\subsubsection{Virtual machines}
\label{appendix:virtualmachines}
Virtual machines (VMs) keep a copy of a full operating system that can be run on other operating systems.
This includes the lowest-level kernel which connects to the hardware.
VMs thus provide the ultimate control one can have over the run-time environment of an analysis.
However, the VM's kernel does not talk directly to the hardware that is doing the analysis, it talks to a simulated hardware that is provided by the operating system's kernel.
Therefore, a process that is run inside a virtual machine can be much slower than one that is run on a native kernel.
VMs are used by cloud providers, enabling them to sell fully independent operating systems on their large servers, to their customers (where the customer can have root access).
But because of all the overhead, they aren't used often used for reproducing individual processes.

\subsubsection{Containers}
Containers are higher-level constructs that don't have their own kernel, they talk directly with the host operating system kernel, but have their own independent software for everything else.
Therefore, they have much less overhead in storage, and hardware/CPU access.
Users often choose an operating system for the container's independent operating system (most commonly GNU/Linux distributions which are free software).

Below we'll review some of the most common container solutions: Docker and Singularity.

\begin{itemize}
\item {\bf\small Docker containers:} Docker is one of the most popular tools today for keeping an independent analysis environment.
  It is primarily driven by the need of software developers: they need to be able to reproduce a bug on the ``cloud'' (which is just a remote VM), where they have root access.
  A Docker container is composed of independent Docker ``images'' that are built with Dockerfiles.
  It is possible to precisely version/tag the images that are imported (to avoid downloading the latest/different version in a future build).
  To have a reproducible Docker image, it must be ensured that all the imported Docker images check their dependency tags down to the initial image which contains the C library.

  Another important drawback of Docker for scientific applications is that it runs as a daemon (a program that is always running in the background) with root permissions.
  This is a major security flaw that discourages many high performance computing (HPC) facilities from installing it.

\item {\bf\small Singularity:} Singularity is a single-image container (unlike Docker which is composed of modular/independent images).
  Although it needs root permissions to be installed on the system (once), it doesn't require root permissions every time it is run.
  Its main program is also not a daemon, but a normal program that can be stopped.
  These features make it much easier for HPC administrators to install Docker.
  However, the fact that it requires root access for initial install is still a hindrance for a random project: if its not present on the HPC, the project can't be run as a normal user.

\item {\bf\small Virtualenv:} \tonote{Discuss it later.}
\end{itemize}

When the installed software within VMs or containers is precisely under control, they are good solutions to reproducibly ``running''/repeating an analysis.
However, because they store the already-built software environment, they are not good for ``studying'' the analysis (how the environment was built).
Currently, the most common practice to install software within containers is to use the package manager of the operating system within the image, usually a minimal Debian-based GNU/Linux operating system.
For example the Dockerfile\footnote{\url{https://github.com/benmarwick/1989-excavation-report-Madjedbebe/blob/master/Dockerfile}} in the reproducible scripts of \citet{clarkso15}, which uses \inlinecode{sudo apt-get install r-cran-rjags -y} to install the R interface to the JAGS Bayesian statistics (rjags).
However, the operating system package managers aren't static.
Therefore the versions of the downloaded and used tools within the Docker image will change depending when it was built.
At the time \citet{clarkso15} was published (June 2015), the \inlinecode{apt} command above would download and install rjags 3-15, but today (January 2020), it will install rjags 4-10.
Such problems can be corrected with robust/reproducible package managers like Nix or GNU Guix within the docker image (see Appendix \ref{appendix:packagemanagement}), but this is rarely practiced today.

\subsubsection{Package managers}
\label{appendix:packagemanagersinenv}
The virtual machine and container solutions mentioned above, install software in standard Unix locations (for example \inlinecode{/usr/bin}), but in their own independent operating systems.
But if software are built in, and used from, a non-standard, project specific directory, we can have an independent build and run-time environment without needing root access, or the extra layers of the container or VM.
This leads us to the final method of having an independent environment: a controlled build of the software and its run-time environment.
Because this is highly intertwined with the way software are installed, we'll describe it in more detail in Section \ref{appendix:packagemanagement} where package managers are reviewed.





\subsection{Package management}
\label{appendix:packagemanagement}

Package management is the process of automating the installation of software.
A package manager thus contains the following information on each software package that can be run automatically: the URL of the software's tarball, the other software that it possibly depends on, and how to configure and build it.

Here some of package management solutions that are used by the reviewed reproducibility solutions of Appendix \ref{appendix:existingsolutions} are reviewed\footnote{For a list of existing package managers, please see \url{https://en.wikipedia.org/wiki/List_of_software_package_management_systems}}.
Note that we are not including package manager that are only limited to one language, for example \inlinecode{pip} (for Python) or \inlinecode{tlmgr} (for \LaTeX).



\subsubsection{Operating system's package manager}
The most commonly used package managers are those of the host operating system, for example \inlinecode{apt} or \inlinecode{yum} respectively on Debian-based, or RedHat-based GNU/Linux operating systems (among many others).

These package managers are tightly intertwined with the operating system.
Therefore they require root access, and arbitrary control (for different projects) of the versions and configuration options of software within them is not trivial/possible: for example a special version of a software that may be necessary for a project, may conflict with an operating system component, or another project.
Furthermore, in many operating systems it is only possible to have one version of a software at any moment (no including Nix or GNU Guix which can also be independent of the operating system, described below).
Hence if two projects need different versions of a software, it is not possible to work on them at the same time.

When a full container or virtual machine (see Appendix \ref{appendix:independentenvironment}) is used for each project, it is common for projects to use the containerized operating system's package manager.
However, it is important to remember that operating system package managers are not static: software are updated on their servers.
For example, simply adding \inlinecode{apt install gcc} to a \inlinecode{Dockerfile} will install different versions of GCC based on when the Docker image is created.
Requesting a special version also doesn't fully address the problem because the package managers also download and install its dependencies.
Hence a fixed version of the dependencies must also be included.

In summary, these package managers are primarily meant for the operating system components.
Hence, many robust reproducible analysis solutions (reviewed in Appendix \ref{appendix:existingsolutions}) don't use the host's package manager, but an independent package manager, like the ones below.

\subsubsection{Conda/Anaconda}
\label{appendix:conda}
Conda is an independent package manager that can be used on GNU/Linux, macOS, or Windows operating systems, although all software packages are not available in all operating systems.
Conda is able to maintain an approximately independent environment on an operating system without requiring root access.

Conda tracks the dependencies of a package/environment through a YAML formatted file, where the necessary software and their acceptable versions are listed.
However, it is not possible to fix the versions of the dependencies through the YAML files alone.
This is thoroughly discussed under issue 787 (in May 2019) of \inlinecode{conda-forge}\footnote{\url{https://github.com/conda-forge/conda-forge.github.io/issues/787}}.
In that discussion, the authors of \citet{uhse19} report that the half-life of their environment (defined in a YAML file) is 3 months, and that at least one of their their dependencies breaks shortly after this period.
The main reply they got in the discussion is to build the Conda environment in a container, which is also the suggested solution by \citet{gruning18}.
However, as described in Appendix \ref{appendix:independentenvironment} containers just hide the reproducibility problem, they don't fix it: containers aren't static and need to evolve (i.e., re-built) with the project.
Given these limitations, \citet{uhse19} are forced to host their conda-packaged software as tarballs on a separate repository.

Conda installs with a shell script that contains a binary-blob (+500 mega bytes, embedded in the shell script).
This is the first major issue with Conda: from the shell script, it is not clear what is in this binary blob and what it does.
After installing Conda in any location, users can easily activate that environment by loading a special shell script into their shell.
However, the resulting environment is not fully independent of the host operating system as described below:

\begin{itemize}
\item The Conda installation directory is present at the start of environment variables like \inlinecode{PATH} (which is used to find programs to run) and other such environment variables.
  However, the host operating system's directories are also appended afterwards.
  Therefore, a user, or script may not notice that a software that is being used is actually coming from the operating system, not the controlled Conda installation.

\item Generally, by default Conda relies heavily on the operating system and doesn't include core analysis components like \inlinecode{mkdir}, \inlinecode{ls} or \inlinecode{cp}.
  Although they are generally the same between different Unix-like operating systems, they have their differences.
  For example \inlinecode{mkdir -p} is a common way to build directories, but this option is only available with GNU Coreutils (default on GNU/Linux systems).
  Running the same command within a Conda environment on a macOS for example, will crash.
  Important packages like GNU Coreutils are available in channels like conda-forge, but they are not the default.
  Therefore, many users may not recognize this, and failing to account for it, will cause unexpected crashes.

\item Many major Conda packaging ``channels'' (for example the core Anaconda channel, or very popular conda-forge channel) don't include the C library, that a package was built with, as a dependency.
  They rely on the host operating system's C library.
  C is the core language of most modern operating systems and even higher-level languages like Python or R are written in it, and need it to run.
  Therefore if the host operating system's C library is different from the C library that a package was built with, a Conda-packaged program will crash and the project will not be executable.
  Theoretically, it is possible to define a new Conda ``channel'' which includes the C library as a dependency of its software packages, but it will take too much time for any individual team to practically implement all their necessary packages, up to their high-level science software.

\item Conda does allow a package to depend on a special build of its prerequisites (specified by a checksum, fixing its version and the version of its dependencies).
  However, this is rarely practiced in the main Git repositories of channels like Anaconda and conda-forge: only the name of the high-level prerequisite packages is listed in a package's \inlinecode{meta.yaml} file, which is version-controlled.
  Therefore two builds of the package from the same Git repository will result in different tarballs (depending on what prerequisites were present at build time).
  In the Conda tarball (that contains the binaries and is not under version control) \inlinecode{meta.yaml} does include the exact versions of most build-time dependencies.
  However, because the different software of one project may have been built at different times, if they depend on different versions of a single software there will be a conflict and the tarball can't be rebuilt, or the project can't be run.
\end{itemize}

As reviewed above, the low-level dependence of Conda on the host operating system's components and build-time conditions, is the primary reason that it is very fast to install (thus making it an attractive tool to software developers who just need to reproduce a bug in a few minutes).
However, these same factors are major caveats in a scientific scenario, where long-term archivability, readability or usability are important. % alternative to `archivability`?



\subsubsection{Nix or GNU Guix}
\label{appendix:nixguix}
Nix \citep{dolstra04} and GNU Guix \citep{courtes15} are independent package managers that can be installed and used on GNU/Linux operating systems, and macOS (only for Nix, prior to macOS Catalina).
Both also have a fully functioning operating system based on their packages: NixOS and ``Guix System''.
GNU Guix is based on Nix, so we'll focus the review here on Nix.

The Nix approach to package management is unique in that it allows exact dependency tracking of all the dependencies, and allows for multiple versions of a software, for more details see \citep{dolstra04}.
In summary, a unique hash is created from all the components that go into the building of the package.
That hash is then prefixed to the software's installation directory.
For example \citep[from][]{dolstra04} if a certain build of GNU C Library 2.3.2 has a hash of \inlinecode{8d013ea878d0}, then it is installed under \inlinecode{/nix/store/8d013ea878d0-glibc-2.3.2} and all software that are compiled with it (and thus need it to run) will link to this unique address.
This allows for multiple versions of the software to co-exist on the system, while keeping an accurate dependency tree.

As mentioned in \citet{courtes15}, one major caveat with using these package managers is that they require a daemon with root privileges.
This is necessary ``to use the Linux kernel container facilities that allow it to isolate build processes and maximize build reproducibility''.

\tonote{While inspecting the Guix build instructions for some software, I noticed they don't actually mention the version names. This creates a similar issue withe Conda example above (how to regenerate the software with a given hash, given that its dependency versions aren't explicitly mentioned. Ask Ludo' about this.}


\subsubsection{Spack}
Spack is a package manager that is also influenced by Nix (similar to GNU Guix), see \citet{gamblin15}.
  But unlike Nix or GNU Guix, it doesn't aim for full, bit-wise reproducibility and can be built without root access in any generic location.
  It relies on the host operating system for the C library.

  Spack is fully written in Python, where each software package is an instance of a class, which defines how it should be downloaded, configured, built and installed.
  Therefore if the proper version of Python is not present, Spack cannot be used and when incompatibilities arise in future versions of Python (similar to how Python 3 is not compatible with Python 2), software building recipes, or the whole system, have to be upgraded.
  Because of such bootstrapping problems (for example how Spack needs Python to build Python and other software), it is generally a good practice to use simpler, lower-level languages/systems for a low-level operation like package management.


\subsection{Package management conclusion}
There are two common issues regarding generic package managers that hinders their usage for high-level scientific projects, as listed below:
\begin{itemize}
\item {\bf\small Pre-compiled/binary downloads:} Most package managers (excluding Nix or its derivatives) only download the software in a binary (pre-compiled) format.
  This allows users to download it very fast and almost instantaneously be able to run it.
  However, to provide for this, servers need to keep binary files for each build of the software on different operating systems (for example Conda needs to keep binaries for Windows, macOS and GNU/Linux operating systems).
  It is also necessary for them to store binaries for each build, which includes different versions of its dependencies.
  This will take major space on the servers, therefore once the shelf-life of a binary has expired, it will not be easy to reproduce a project that depends on it .

  For example Debian's Long Term Support is only valid for 5 years.
  Pre-built binaries of the ``Stable'' branch will only be kept during this period and this branch only gets updated once every two years.
  However, scientific software commonly evolve on much faster rates.
  Therefore scientific projects using Debian often use the ``Testing'' branch which has more up to date features.
  The problem is that binaries on the Testing branch are immediately removed when no other package depends on it, and a newer version is available.
  This is not limited to operating systems, similar problems are also reported in Conda for example, see the discussion of Conda above for one real-world example.


\item {\bf\small Adding high-level software:} Packaging new software is not trivial and needs a good level of knowledge/experience with that package manager.
For example each has its own special syntax/standards/languages, with pre-defined variables that must already be known to someone packaging new software.
However, in many scenarios, the most high-level software of a research project are written and used only by the team that is doing the research, even when they are distributed with free licenses on open repositories.
Although active package manager members are commonly very supportive in helping to package new software, many teams may not take that extra effort/time.
They will thus manually install their high-level software in an uncontrolled, or non-standard way, thus jeopardizing the reproducibility of the whole work.

\item {\bf\small Built for a generic scenario} All the package managers above are built for one full system, that can possibly be run by multiple projects.
  This can result in not fully documenting the process that each package was built (for example the versions of the dependent libraries of a package).
\end{itemize}

Addressing these issues has been the basic reason d'\^etre of the proposed template's approach to package management strategy: instructions to download and build the packages are included within the actual science project (thus fully customizable) and no special/new syntax/language is used: software download, building and installation is done with the same language/syntax that researchers manage their research: using the shell (GNU Bash) and Make (GNU Make).



\subsection{Version control}
\label{appendix:versioncontrol}
A scientific project is not written in a day.
It commonly takes more than a year (for example a PhD project is 3 or 4 years).
During this time, the project evolves significantly from its first starting date and components are added or updated constantly as it approaches completion.
Added with the complexity of modern projects, is not trivial to manually track this evolution, and its affect of on the final output: files produced in one stage of the project may be used at later stages (where the project has evolved).
Furthermore, scientific projects do not progress linearly: earlier stages of the analysis are often modified after later stages are written.
This is a natural consequence of the scientific method; where progress is defined by experimentation and modification of hypotheses (earlier phases).

It is thus very important for the integrity of a scientific project that the state/version of its processing is recorded as the project evolves for example better methods are found or more data arrive.
Any intermediate dataset that is produced should also be tagged with the version of the project at the time it was created.
In this way, later processing stages can make sure that they can safely be used, i.e., no change has been made in their processing steps.

Solutions to keep track of a project's history have existed since the early days of software engineering in the 1970s and they have constantly improved over the last decades.
Today the distributed model of ``version control'' is the most common, where the full history of the project is stored locally on different systems and can easily be integrated.
There are many existing version control solutions, for example CVS, SVN, Mercurial, GNU Bazaar, or GNU Arch.
However, currently, Git is by far the most commonly used in individual projects and long term archival systems like Software Heritage \citep{dicosmo18}, it is also the system that is used in the proposed template, so we'll only review it here.

\subsubsection{Git}
With Git, changes in a project's contents are accurately identified by comparing them with their previous version in the archived Git repository.
When the user decides the changes are significant compared to the archived state, they can ``commit'' the changes into the history/repository.
The commit involves copying the changed files into the repository and calculating a 40 character checksum/hash that is calculated from the files, an accompanying ``message'' (a narrative description of the project's state), and the previous commit (thus creating a ``chain'' of commits that are strongly connected to each other).
For example \inlinecode{f4953cc\-f1ca8a\-33616ad\-602ddf\-4cd189\-c2eff97b} is a commit identifier in the Git history that this paper is being written in.
Commits are is commonly summarized by the checksum's first few characters, for example \inlinecode{f4953cc}.

With Git, making parallel ``branches'' (in the project's history) is very easy and its distributed nature greatly helps in the parallel development of a project by a team.
The team can host the Git history on a webpage and collaborate through that.
There are several Git hosting services for example \href{http://github.com}{github.com}, \href{http://gitlab.com}{gitlab.com}, or \href{http://bitbucket.org}{bitbucket.org} (among many others).





\subsection{Job management}
\label{appendix:jobmanagement}
Any analysis will involve more than one logical step.
For example it is first necessary to download a dataset, then to do some preparations on it, then to actually use it, and finally to make visualizations/tables that can be imported into the final report.
Each one of these is a logically independent step which needs to be run before/after the others in a specific order.
There are many tools for managing the sequence of jobs, below we'll review the most common ones that are also used in the proposed template, or the existing reproducibility solutions of Appendix \ref{appendix:existingsolutions}.

\subsubsection{Scripts}
\label{appendix:scripts}
Scripts (in any language, for example GNU Bash, or Python) are the most common ways of organizing a series of steps.
They are primarily designed execute each step sequentially (one after another), making them also very intuitive.
However, as the series of operations become complex and large, managing the workflow in a script will become highly complex.
For example if 90\% of a long project is already done and a researcher wants to add a followup step, a script will go through all the previous steps (which can take significant time).
Also, if a small step in the middle of an analysis has to be changed, the full analysis needs to be re-run: scripts have no concept of dependencies (so only the steps that are affected by that change are run).
Such factors discourage experimentation, which is a critical component of the scientific method.
It is possible to manually add conditionals all over the script to add dependencies, but they just make it harder to read, and introduce many bugs themselves.
Parallelization is another drawback of using scripts.
While its not impossible, because of the high-level nature of scripts, it is not trivial and parallelization can also be very inefficient or buggy.


\subsubsection{Make}
\label{appendix:make}
Make was originally designed to address the problems mentioned in Appendix \ref{appendix:scripts} for scripts \citep{feldman79}.
In particular this motivation arose from management issues related to program compilation with many source code files.
With Make, the various source files of a program that haven't been changed, wouldn't be recompiled.
Also, when two source files didn't depend on each other, and both needed to be rebuilt, they could be built in parallel.
This greatly helped in debugging of software projects, and speeding up test builds, giving Make a core place in software building tools since then.
The most common implementation of Make, since the early 1990s, is GNU Make \citep[\url{http://www.gnu.org/s/make}]{stallman88}.
The proposed solution uses Make to organize its workflow, see Section \ref{sec:usingmake}.
Here, we'll complement that section with more technical details on Make.

Usually, the top-level Make instructions are placed in a file called Makefile, but it is also common to use the \inlinecode{.mk} suffix for custom file names.
Each stage/step in the analysis is defined through a \emph{rule}.
Rules define \emph{recipes} to build \emph{targets} from \emph{pre-requisites}.
In POSIX operating systems (Unix-like), everything is a file, even directories and devices.
Therefore all three components in a rule must be files on the running filesystem.
Figure \ref{fig:makeexample} demonstrates a hypothetical Makefile with the targets, prerequisites and recipes highlighted.

\begin{figure}[t]
  {\small
  \texttt{\mkcomment{\# The ultimate "target" of this Makefile is 'ultimate.txt' (the first target Make finds).}}

  \texttt{\mktarget{ultimate.txt}: out.txt\hfill\mkcomment{\# 'ultimate.txt' depends on 'out.txt'.{ }{ }{ }{ }{ }}}

  \texttt{\mktab{}awk '\$1\textless5' out.txt \textgreater{ }\mktarget{ultimate.txt}\hfill\mkcomment{\# Only rows with 1st column less than 5.{ }{ }{ }}}

  \vspace{1em}
  \texttt{\mkcomment{\# But 'out.txt', is created by a Python script, and 'params.conf' keeps its configuration.}}

  \texttt{\mktarget{out.txt}: run.py params.conf}

  \texttt{\mktab{}python run.py --in=params.conf --out=\mktarget{out.txt}}
  }

  \caption{\label{fig:makeexample}An example Makefile that describes how to build \inlinecode{ultimate.txt} with two \emph{rules}.
    \emph{targets} (blue) are placed before the colon (\texttt{:}).
    \emph{prerequisites} (green) are placed after the colon.
    The \emph{recipe} to build the targets from the prerequisites is placed after a \texttt{TAB}.
    The final target is the first one that Make confronts (\inlinecode{ultimate.txt}).
    It depends on the output of a Python program (\inlinecode{run.py}), which is configured by \inlinecode{params.conf}.
    Anytime \inlinecode{run.py} or \inlinecode{params.conf} are edited/updated, \inlinecode{out.txt} is re-created and thus \inlinecode{ultimate.txt} is also re-created.
    }
\end{figure}

To decide which operation should be re-done when executed, Make compares the time stamp of the targets and prerequisites.
When any of the prerequisite(s) is newer than a target, the recipe is re-run to re-build the target.
When all the prerequisites are older than the target, that target doesn't need to be rebuilt.
The recipe can contain any number of commands, they should just all start with a \inlinecode{TAB}.
Going deeper into the syntax of Make is beyond the scope of this paper, but we recommend interested readers to consult the GNU Make manual\footnote{\url{http://www.gnu.org/software/make/manual/make.pdf}}.

\subsubsection{SCons}
Scons (\url{https://scons.org}) is a Python package for managing operations outside of Python (in contrast to CGAT-core, discussed below, which only organizes Python functions).
In many aspects it is similar to Make, for example it is managed through a `SConstruct' file.
Like a Makefile, SConstruct is also declarative: the running order is not necessarily the top-to-bottom order of the written operations within the file (unlike the imperative paradigm which is common in languages like C, Python, or FORTRAN).
However, unlike Make, SCons doesn't use the file modification date to decide if it should be remade.
SCons keeps the MD5 hash of all the files (in a hidden binary file) to check if the contents has changed.

SCons thus attempts to work on a declarative file with an imperative language (Python).
It also goes beyond raw job management and attempts to extract information from within the files (for example to identify the libraries that must be linked while compiling a program).
SCons is therefore more complex than Make: its manual is almost double that of GNU Make.
Besides added complexity, all these ``smart'' features decrease its performance, especially as files get larger and more numerous: on every call, every file's checksum has to be calculated, and a Python system call has to be made (which is computationally expensive).

Finally, it has the same drawback as any other tool that uses high-level languages, see Section \ref{appendix:highlevelinworkflow}.
We encountered such a problem while testing SCons: on the Debian-10 testing system, the \inlinecode{python} program pointed to Python 2.
However, since Python 2 is now obsolete, SCons was built with Python 3 and our first run crashed.
To fix it, we had to either manually change the core operating system path, or the SCons source hashbang.
The former will conflict with other system tools that assume \inlinecode{python} points to Python-2, the latter may need root permissions for some systems.
This can also be problematic when a Python analysis library, may require a Python version that conflicts with the running SCons.

\subsubsection{CGAT-core}
CGAT-Core (\url{https://cgat-core.readthedocs.io/en/latest}) is a Python package for managing workflows, see \citet{cribbs19}.
It wraps analysis steps in Python functions and uses Python decorators to track the dependencies between tasks.
It is used papers like \citet{jones19}, but as mentioned in \citet{jones19} it is good for managing individual outputs (for example separate figures/tables in the paper, when they are fully created within Python).
Because it is primarily designed for Python tasks, managing a full workflow (which includes many more components, written in other languages) is not trivial in it.
Another drawback with this workflow manager is that Python is a very high-level language where future versions of the language may no longer be compatible with Python 3, that CGAT-core is implemented in (similar to how Python 2 programs are not compatible with Python 3).

\subsubsection{Guix Workflow Language (GWL)}
GWL (\url{https://www.guixwl.org}) GWL is based on the declarative language that GNU Guix uses for package management (see Appendix \ref{appendix:packagemanagement}), which is itself based on the general purpose Scheme language.
It is closely linked with GNU Guix and can even install the necessary software needed for each individual process.
Hence in the GWL paradigm, software installation and usage doesn't have to be separated.
GWL has two high-level concepts called ``processes'' and ``workflows'' where the latter defines how multiple processes should be executed together.

As described above shell scripts and Make are a common and highly used system that have existed for several decades and many researchers are already familiar with them and have already used them.
The list of necessary software solutions for the various stages of a research project (listed in the subsections of Appendix \ref{appendix:existingtools}), is already very large, and each software has its own learning curve (which is a heavy burden for a natural or social scientist for example).
The other workflow management tools are too specific to a special paradigm, for example CGAT-core is written for Python, or GWL is intertwined with GNU Guix.
Therefore their generalization into any kind of problem is not trivial.

Also, high-level and specific solutions will evolve very fast, for example the Popper solution to reproducible research (see Appendix \ref{appendix:popper}) organized its workflow through the HashiCorp configuration language (HCL) because it was the default in GitHub.
However, in September 2019, GitHub dropped HCL as its default configuration language and is now using its own custom YAML-based language.
Using such high-level, or provider-specific solutions also has the problem that it makes them hard, or impossible, to use in any generic system.
Therefore a robust solution would avoid designing their low-level processing steps in these languages and only use them for the highest-level layer of their project, depending on which provider they want to run their project on.



\subsection{Editing steps and viewing results}
\label{appendix:editors}
In order to later reproduce a project, the analysis steps must be stored in files.
For example Shell, Python or R scripts, Makefiles, Dockerfiles, or even the source files of compiled languages like C or FORTRAN.
Given that a scientific project does not evolve linearly and many edits are needed as it evolves, it is important to be able to actively test the analysis steps while writing the project's source files.
Here we'll review some common methods that are currently used.

\subsubsection{Text editors}
The most basic way to edit text files is through simple text editors which just allow viewing and editing such files, for example \inlinecode{gedit} on the GNOME graphic user interface.
However, working with simple plain text editors like \inlinecode{gedit} can be very frustrating since its necessary to save the file, then go to a terminal emulator and execute the source files.
To solve this problem there are advanced text editors like GNU Emacs that allow direct execution of the script, or access to a terminal within the text editor.
However, editors that can execute or debug the source (like GNU Emacs), just run external programs for these jobs (for example GNU GCC, or GNU GDB), just as if those programs was called from outside the editor.

With text editors, the final edited file is independent of the actual editor and can be further edited with another editor, or executed without it.
This is a very important feature that is not commonly present for other solutions mentioned below.
Another very important advantage of advanced text editors like GNU Emacs or Vi(m) is that they can also be run without a graphic user interface, directly on the command-line.
This feature is critical when working on remote systems, in particular high performance computing (HPC) facilities that don't provide a graphic user interface.

\subsubsection{Integrated Development Environments (IDEs)}
To facilitate the development of source files, IDEs add software building and running environments as well as debugging tools to a plain text editor.
Many IDEs have their own compilers and debuggers, hence source files that are maintained in IDEs are not necessarily usable/portable on other systems.
Furthermore, they usually require a graphic user interface to run.
In summary IDEs are generally very specialized tools, for special projects and are not a good solution when portability (the ability to run on different systems) is required.

\subsubsection{Jupyter}
Jupyter \citep[initially IPython,][]{kluyver16} is an implementation of Literate Programming \citep{knuth84}.
The main user interface is a web-based ``notebook'' that contains blobs of executable code and narrative.
Jupyter uses the custom built \inlinecode{.ipynb} format\footnote{\url{https://nbformat.readthedocs.io/en/latest}}.
Jupyter's name is a combination of the three main languages it was designed for: Julia, Python and R.
The \inlinecode{.ipynb} format, is a simple, human-readable (can be opened in a plain-text editor) file, formatted in JavaScript Object Notation (JSON).
It contains various kinds of ``cells'', or blobs, that can contain narrative description, code, or multi-media visualizations (for example images/plots), that are all stored in one file.
The cells can have any order, allowing the creation of a literal programming style graphical implementation, where narrative descriptions and executable patches of code can be intertwined.
For example to have a paragraph of text about a patch of code, and run that patch immediately in the same page.

The \inlinecode{.ipynb} format does theoretically allow dependency tracking between cells, see IPython mailing list (discussion started by Gabriel Becker from July 2013\footnote{\url{https://mail.python.org/pipermail/ipython-dev/2013-July/010725.html}}).
Defining dependencies between the cells can allow non-linear execution which is critical for large scale (thousands of files) and complex (many dependencies between the cells) operations.
It allows automation, run-time optimization (deciding not to run a cell if its not necessary) and parallelization.
However, Jupyter currently only supports a linear run of the cells: always from the start to the end.
It is possible to manually execute only one cell, but the previous/next cells that may depend on it, also have to be manually run (a common source of human error, and frustration for complex operations).
Integration of directional graph features (dependencies between the cells) into Jupyter has been discussed, but as of this publication, there is no plan to implement it (see Jupyter's GitHub issue 1175\footnote{\url{https://github.com/jupyter/notebook/issues/1175}}).

The fact that the \inlinecode{.ipynb} format stores narrative text, code and multi-media visualization of the outputs in one file, is another major hurdle:
The files can easy become very large (in volume/bytes) and hard to read from source.
Both are critical for scientific processing, especially the latter: when a web-browser with proper JavaScript features isn't available (can happen in a few years).
This is further exacerbated by the fact that binary data (for example images) are not directly supported in JSON and have to be converted into much less memory-efficient textual encodings.

Finally, Jupyter has an extremely complex dependency graph: on a clean Debian 10 system, Pip (a Python package manager that is necessary for installing Jupyter) required 19 dependencies to install, and installing Jupyter within Pip needed 41 dependencies!
\citet{hinsen15} reported such conflicts when building Jupyter into the Active Papers framework (see Appendix \ref{appendix:activepapers}).
However, the dependencies above are only on the server-side.
Since Jupyter is a web-based system, it requires many dependencies on the viewing/running browser also (for example special JavaScript or HTML5 features, which evolve very fast).
As discussed in Appendix \ref{appendix:highlevelinworkflow} having so many dependencies is a major caveat for any system regarding scientific/long-term reproducibility (as opposed to industrial/immediate reproducibility).
In summary, Jupyter is most useful in manual, interactive and graphical operations for temporary operations (for example educational tutorials).






\subsection{Project management in high-level languages}
\label{appendix:highlevelinworkflow}

Currently the most popular high-level data analysis language is Python.
R is closely tracking it, and has superseded Python in some fields, while Julia \citep[with its much better performance compared to R and Python, in a high-level structure, see][]{bezanson17} is quickly gaining ground.
These languages have themselves superseded previously popular languages for data analysis of the previous decades, for example Java, Perl or C++.
All are part of the C-family programming languages.
In many cases, this means that the tools to use that language are written in C, which is the language of the operating system.

Scientists, or data analysts, mostly use these higher-level languages.
Therefore they are naturally drawn to also apply the higher-level languages for lower-level project management, or designing the various stages of their workflow.
For example Conda or Spack (Appendix \ref{appendix:packagemanagement}), CGAT-core (Appendix \ref{appendix:jobmanagement}), Jupyter (Appendix \ref{appendix:editors}) or Popper (Appendix \ref{appendix:popper}) are written in Python.
The discussion below applies to both the actual analysis software and project management software.
In this context, its more focused on the latter.

Because of their nature, higher-level languages evolve very fast, creating incompatibilities on the way.
The most prominent example is the transition from Python 2 (released in 2000) to Python 3 (released in 2008).
Python 3 was incompatible with Python 2 and it was decided to abandon the former by 2015.
However, due to community pressure, this was delayed to January 1st, 2020.
The end-of-life of Python 2 caused many problems for projects that had invested heavily in Python 2: all their previous work had to be translated, for example see \citet{jenness17} or Appendix \ref{appendix:sciunit}.
Some projects couldn't make this investment and their developers decided to stop maintaining it, for example VisTrails (see Appendix \ref{appendix:vistrails}).

The problems weren't just limited to translation.
Python 2 was still actively being actively used during the transition period (and is still being used by some, after its end-of-life).
Therefore, developers of packages used by others had to maintain (for example fix bugs in) both versions in one package.
This isn't particular to Python, a similar evolution occurred in Perl: in 2000 it was decided to improve Perl 5, but the proposed Perl 6 was incompatible with it.
However, the Perl community decided not to abandon Perl 5, and Perl 6 was eventually defined as a new language that is now officially called ``Raku'' (\url{https://raku.org}).

It is unreasonably optimistic to assume that high-level languages won't undergo similar incompatible evolutions in the (not too distant) future.
For software developers, this isn't a problem at all: non-scientific software, and the general population's usage of them, evolves extremely fast and it is rarely (if ever) necessary to look into codes that are more than a couple of years old.
However, in the sciences (which are commonly funded by public money) this is a major caveat for the longer-term usability of solutions that are designed.

In summary, in this section we are discussing the bootstrapping problem as regards scientific projects: the workflow/pipeline can reproduce the analysis and its dependencies, but the dependencies of the workflow itself cannot not be ignored.
The most robust way to address this problem is with a workflow management system that ideally doesn't need any major dependencies: tools that are already part of the operating system.

Beyond technical, low-level, problems for the developers mentioned above, this causes major problems for scientific project management as listed below:

\subsubsection{Dependency hell}
The evolution of high-level languages is extremely fast, even within one version.
For example packages that are written in Python 3 often only work with a special interval of Python 3 versions (for example newer than Python 3.6).
This isn't just limited to the core language, much faster changes occur in their higher-level libraries.
For example version 1.9 of Numpy (Python's numerical analysis module) discontinued support for Numpy's predecessor (called Numeric), causing many problems for scientific users \citep[see][]{hinsen15}.

On the other hand, the dependency graph of tools written in high-level languages is often extremely complex.
For example see Figure 1 of \citet{alliez19}, it shows the dependencies and their inter-dependencies for Matplotlib (a popular plotting module in Python).

Acceptable dependency intervals between the dependencies will cause incompatibilities in a year or two, when a robust package manager is not used (see Appendix \ref{appendix:packagemanagement}).
Since a domain scientist doesn't always have the resources/knowledge to modify the conflicting part(s), many are forced to create complex environments with different versions of Python and pass the data between them (for example just to use the work of a previous PhD student in the team).
This greatly increases the complexity of the project, even for the principal author.
A good reproducible workflow can account for these different versions.
However, when the actual workflow system (not the analysis software) is written in a high-level language this will cause a major problem.

For example, merely installing the Python installer (\inlinecode{pip}) on a Debian system (with \inlinecode{apt install pip2} for Python 2 packages), required 32 other packages as dependencies.
\inlinecode{pip} is necessary to install Popper and Sciunit (Appendices \ref{appendix:popper} and \ref{appendix:sciunit}).
As of this writing, the \inlinecode{pip3 install popper} and \inlinecode{pip2 install sciunit2} commands for installing each, required 17 and 26 Python modules as dependencies.
It is impossible to run either of these solutions if there is a single conflict in this very complex dependency graph.
This problem actually occurred while we were testing Sciunit: even though it installed, it couldn't run because of conflicts (its last commit was only 1.5 years old), for more see Appendix \ref{appendix:sciunit}.
\citet{hinsen15} also report a similar problem when attempting to install Jupyter (see Appendix \ref{appendix:editors}).
Of course, this also applies to tools that these systems use, for example Conda (which is also written in Python, see Appendix \ref{appendix:packagemanagement}).





\subsubsection{Generational gap}
This occurs primarily for domain scientists (for example astronomers, biologists or social sciences).
Once they have mastered one version of a language (mostly in the early stages of their career), they tend to ignore newer versions/languages.
The inertia of programming languages is very strong.
This is natural, because they have their own science field to focus on, and re-writing their very high-level analysis toolkits (which they have curated over their career and is often only readable/usable by themselves) in newer languages requires too much investment and time.

When this investment is not possible, either the mentee has to use the mentor's old method (and miss out on all the new tools, which they need for the future job prospects), or the mentor has to avoid implementation details in discussions with the mentee, because they don't share a common language.
The authors of this paper have personal experiences in both mentor/mentee relational scenarios.
This failure to communicate in the details is a very serious problem, leading to the loss of valuable inter-generational experience.




















\section{Survey of common existing reproducible workflows}
\label{appendix:existingsolutions}

As reviewed in the introduction (Section \ref{sec:introduction}), the problem of reproducibility has received a lot of attention over the last three decades and various solutions have already been proposed.
In this appendix, some of the solutions are reviewed.
The solutions are based on an evolving software landscape, therefore they are ordered by date\footnote{When the project has a webpage, the year of its first release is used, otherwise their paper's publication year is used.}.
For each solution, we summarize its methodology and discuss how it relates to the principles in Section \ref{sec:principles}.
Freedom of the software/method is a core concept behind scientific reproducibility, as opposed to industrial reproducibility where a black box is acceptable/desirable.
Therefore proprietary solutions like Code Ocean (\url{https://codeocean.com}) or Nextjournal (\url{https://nextjournal.com}) will not be reviewed here.

\begin{itemize}
\item \citet{konkol20} have also done a review of some tools from various points of view.
\end{itemize}




\subsection{Reproducible Electronic Documents, RED (1992)}
\label{appendix:red}

Reproducible Electronic Documents (\url{http://sep.stanford.edu/doku.php?id=sep:research:reproducible}) is the first attempt that we could find on doing reproducible research \citep{claerbout1992,schwab2000}.
It was developed within the Stanford Exploration Project (SEP) for Geophysics publications.
Their introductions on the importance of reproducibility, resonate a lot with today's environment in computational sciences.
In particular the heavy investment one has to make in order to re-do another scientist's work, even in the same team.
RED also influenced other early reproducible works, for example \citet{buckheit1995}.

To orchestrate the various figures/results of a project, from 1990, they used ``Cake'' \citep[]{somogyi87}, a dialect of Make, for more on Make, see Appendix \ref{appendix:jobmanagement}.
As described in \citep{schwab2000}, in the latter half of that decade, moved to GNU Make \citep{stallman88}, which was much more commonly used, developed and came with a complete and up-to-date manual.
The basic idea behind RED's solution was to organize the analysis as independent steps, including the generation of plots, and organizing the steps through a Makefile.
This enabled all the results to be re-executed with a single command.
Several basic low-level Makefiles were included in the high-level/central Makefile.
The reader/user of a project had to manually edit the central Makefile and set the variable \inlinecode{RESDIR} (result dir), this is the directory where built files are kept.
Afterwards, the reader could set which figures/parts of the project to reproduce by manually adding its name in the central Makefile, and running Make.

At the time, Make was already practiced by individual researchers and projects as a job orchestration tool, but SEP's innovation was to standardize it as an internal policy, and define conventions for the Makefiles to be consistent across projects.
This enabled new members to benefit from the already existing work of previous team members (who had graduated or moved to other jobs).
However, RED only used the existing software of the host system, it had no means to control them.
Therefore, with wider adoption, they confronted a ``versioning problem'' where the host's analysis software had different versions on different hosts, creating different results, or crashing \citep{fomel09}.
Hence in 2006 SEP moved to a new Python-based framework called Madagascar, see Appendix \ref{appendix:madagascar}.





\subsection{Apache Taverna (2003)}
\label{appendix:taverna}
Apache Taverna (\url{https://taverna.incubator.apache.org}) is a workflow management system written in Java with a graphical user interface, see \citet[still being actively developed]{oinn04}.
A workflow is defined as a directed graph, where nodes are called ``processors''.
Each Processor transforms a set of inputs into a set of outputs and they are defined in the Scufl language (an XML-based language, were each step is an atomic task).
Other components of the workflow are ``Data links'' and ``Coordination constraints''.
The main user interface is graphical, where users place processors in a sheet and define links between their inputs outputs.
\citet{zhao12} have studied the problem of workflow decays in Taverna.
In many aspects Taverna is like VisTrails, see Appendix \ref{appendix:vistrails} [Since kepler is older, it may be better to bring the VisTrails features here.]





\subsection{Madagascar (2003)}
\label{appendix:madagascar}
Madagascar (\url{http://ahay.org}) is a set of extensions to the SCons job management tool \citep{fomel13}.
For more on SCons, see Appendix \ref{appendix:jobmanagement}.
Madagascar is a continuation of the Reproducible Electronic Documents (RED) project that was discussed in Appendix \ref{appendix:red}.

Madagascar does include project management tools in the form of SCons extensions.
However, it isn't just a reproducible project management tool, it is primarily a collection of analysis programs, tools to interact with RSF files, and plotting facilities.
For example in our test of Madagascar 3.0.1, it installed 855 Madagascar-specific analysis programs (\inlinecode{PREFIX/bin/sf*}).
The analysis programs mostly target geophysical data analysis, including various project specific tools: more than half of the total built tools are under the \inlinecode{build/user} directory which includes names of Madagascar users.
Following the Unix spirit of modularized programs that communicating through text-based pipes, Madagascar's core is the custom Regularly Sampled File (RSF) format\footnote{\url{http://www.ahay.org/wiki/Guide\_to\_RSF\_file\_format}}.
RSF is a plain-text file that points to the location of the actual data files on the filesystem, but it can also keep the raw binary dataset within same plain-text file.

Besides the location or contents of the data, RSF also contains name/value pairs that can be used as options to Madagascar programs, which are built with inputs and outputs of this format.
Since RSF contains program options also, the inputs and outputs of Madagascar's analysis programs are read from, and written to, standard input and standard output.

Madagascar has been used in the production of hundreds of research papers or book chapters\footnote{\url{http://www.ahay.org/wiki/Reproducible_Documents}} \citep[120 prior to][]{fomel13}.


\subsection{GenePattern (2004)}
\label{appendix:genepattern}
GenePattern (\url{https://www.genepattern.org}) is a client-server software containing many common analysis functions/modules, primarily focused for Gene studies \citet[first released in 2004]{reich06}.
Although its highly focused to a special research field, it is reviewed here because its concepts/methods are generic, and in the context of this paper.

Its server-side software is installed with fixed software packages that are wrapped into GenePattern modules.
The modules are used through a web interface, the modern implementation is GenePattern Notebook \citep{reich17}.
It is an extension of the Jupyter notebook (see Appendix \ref{appendix:editors}), which also has a special ``GenePattern'' cell that will connect to GenePattern servers for doing the analysis.
However, the wrapper modules just call an existing tool on the host system.
Given that each server may have its own set of installed software, the analysis may differ (or crash) when run on different GenePattern servers, hampering reproducibility.

The primary GenePattern server was active since 2008 and had 40,000 registered users with 2000 to 5000 jobs running every week \citep{reich17}.
However, it was shut down on November 15th 2019 due to end of funding\footnote{\url{https://www.genepattern.org/blog/2019/10/01/the-genomespace-project-is-ending-on-november-15-2019}}.
All processing with this sever has stopped, and any archived data on it has been deleted.
Since GenePattern is free software, there are alternative public servers to use, so hopefully work on it will continue.
However, funding is limited and those servers may face similar funding problems.
This is a very nice example of the fragility of solutions that depend on archiving and running high-level research products (including data, binary/compiled code).





\subsection{Kepler (2005)}
Kepler (\url{https://kepler-project.org}) is a Java-based Graphic User Interface workflow management tool \citep{ludascher05}.
Users drag-and-drop analysis components, called ``actors'', into a visual, directional graph, which is the workflow (similar to Figure \ref{fig:analysisworkflow}).
Each actor is connected to others through the Ptolemy approach \citep{eker03}.
In many aspects Kepler is like VisTrails, see Appendix \ref{appendix:vistrails}.
\tonote{Since kepler is older, it may be better to bring the VisTrails features here.}





\subsection{VisTrails (2005)}
\label{appendix:vistrails}

VisTrails (\url{https://www.vistrails.org}) was a graphical workflow managing system that is described in \citet{bavoil05}.
According to its webpage, VisTrails maintainance has stopped since May 2016, its last Git commit, as of this writing, was in November 2017.
However, given that it was well maintained for over 10 years is an achievement.

VisTrails (or ``visualization trails'') was initially designed for managing visualizations, but later grew into a generic workflow system with meta-data and provenance features.
Each analysis step, or module, is recorded in an XML schema, which defines the operations and their dependencies.
The XML attributes of each module can be used in any XML query language to find certain steps (for example those that used a certain command).
Since the main goal was visualization (as images), apparently its primary output is in the form of image spreadsheets.
Its design is based on a change-based provenance model using a custom VisTrails provenance query language (vtPQL), for more see \citet{scheidegger08}.
Since XML is a plane text format, as the user inspects the data and makes changes to the analysis, the changes are recorded as ``trails'' in the project's VisTrails repository that operates very much like common version control systems (see Appendix \ref{appendix:versioncontrol}).

With respect to keeping the history/provenance of the final dataset, VisTrails is very much like the template introduced in this paper.
However, even though XML is in plain text, it is very hard to edit manually.
VisTrails therefore provides a graphic user interface with a visual representation of the project's inter-dependent steps (similar to Figure \ref{fig:analysisworkflow}).
Besides the fact that it is no longer maintained, the conceptual differences with the proposed template are substantial.
The most important is that VisTrails doesn't control the software that is run, it only controls the sequence of steps that they are run in.
This template also defines dependencies and operations based on the very standard and commonly known Make system, not a custom XML format.
Scripts can easily be written to generate an XML-formatted output from Makefiles.





\subsection{Galaxy (2010)}
\label{appendix:galaxy}

Galaxy (\url{https://galaxyproject.org}) is a web-based Genomics workbench \citep{goecks10}.
The main user interface are ``Galaxy Pages'', which doesn't require any programming: users simply use abstract ``tools'' which are a wrappers over command-line programs.
Therefore the actual running version of the program can be hard to control across different Galaxy servers \tonote{confirm this}.
Besides the automatically generated metadata of a project (which include version control, or its history), users can also tag/annotate each analysis step, describing its intent/purpose.
Besides some small differences, this seems to be very similar to GenePattern (Appendix \ref{appendix:genepattern}).





\subsection{Image Processing On Line journal, IPOL (2010)}
The IPOL journal (\url{https://www.ipol.im}) attempts to publish the full implementation details of proposed image processing algorithm as a scientific paper \citep[first published article in July 2010]{limare11}.
An IPOL paper is a traditional research paper, but with a focus on implementation.
The published narrative description of the algorithm must be detailed to a level that any specialist can implement it in their own programming language (extremely detailed).
The author's own implementation of the algorithm is also published with the paper (in C, C++ or MATLAB), the code must be commented well enough and link each part of it with the relevant part of the paper.
The authors must also submit several example datasets/scenarios.
The referee actually inspects the code and narrative, confirming that they match with each other, and with the stated conclusions of the published paper.
After publication, each paper also has a ``demo'' button on its webpage, allowing readers to try the algorithm on a web-interface and even provide their own input.

The IPOL model is indeed the single most robust model of peer review and publishing computational research methods/implementations.
It has grown steadily over the last 10 years, publishing 23 research articles in 2019 alone.
We encourage the reader to visit its webpage and see some of its recent papers and their demos.
It can be so thorough and complete because it has a very narrow scope (image processing), and the published algorithms are highly atomic, not needing significant dependencies (beyond input/output), allowing the referees to go deep into each implemented algorithm.
In fact, high-level languages like Perl, Python or Java are not acceptable precisely because of the additional complexities/dependencies that they require.

Ideally (if any referee/reader was inclined to do so), the proposed template of this paper allows for a similar level of scrutiny, but for much more complex research scenarios, involving hundreds of dependencies and complex processing on the data.



\subsection{WINGS (2010)}
\label{appendix:wings}

WINGS (\url{https://wings-workflows.org}) is an automatic workflow generation algorithm \citep{gil10}.
It runs on a centralized web server, requiring many dependencies (such that it is recommended to download Docker images).
It allows users to define various workflow components (for example datasets, analysis components and etc), with high-level goals.
It then uses selection and rejection algorithms to find the best components using a pool of analysis components that can satisfy the requested high-level constraints.
\tonote{Read more about this}





\subsection{Active Papers (2011)}
\label{appendix:activepapers}
Active Papers (\url{http://www.activepapers.org}) attempts to package the code and data of a project into one file (in HDF5 format).
It was initially written in Java because its compiled byte-code outputs in JVM are portable on any machine \citep[see][]{hinsen11}.
However, Java is not a commonly used platform today, hence it was later implemented in Python \citep{hinsen15}.

In the Python version, all processing steps and input data (or references to them) are stored in a HDF5 file.
However, it can only account for pure-Python packages using the host operating system's Python modules \tonote{confirm this!}.
When the Python module contains a component written in other languages (mostly C or C++), it needs to be an external dependency to the Active Paper.

As mentioned in \citep{hinsen15}, the fact that it relies on HDF5 is a caveat of Active Papers, because many tools are necessary to access it.
Downloading the pre-built HDF View binaries (provided by the HDF group) is not possible anonymously/automatically (login is required).
Installing it using the Debian or Arch Linux package managers also failed due to dependencies.
Furthermore, as a high-level data format HDF5 evolves very fast, for example HDF5 1.12.0 (February 29th, 2020) is not usable with older libraries provided by the HDF5 team. % maybe replace with: February 29\textsuperscript{th}, 2020?

While data and code are indeed fundamentally similar concepts technically \tonote{cite Konrad's paper on this}, they are used by humans differently.
This becomes a burden when large datasets are used, this was also acknowledged in \citet{hinsen15}.
If the data are proprietary (for example medical patient data), the data must not be released, but the methods they were produced can.
Furthermore, since all reading and writing is done in the HDF5 file, it can easily bloat the file to very large sizes due to temporary/reproducible files, and its necessary to remove/dummify them, thus complicating the code, making it hard to read.
For example the Active Papers HDF5 file of \citet[in \href{https://doi.org/10.5281/zenodo.2549987}{zenodo.2549987}]{kneller19} is 1.8 giga-bytes.

In many scenarios, peers just want to inspect the processing by reading the code and checking a very special part of it (one or two lines), not necessarily needing to run it, or obtaining the datasets.
Hence the extra volume for data, and obscure HDF5 format that needs special tools for reading plain text code is a major burden.





\subsection{Collage Authoring Environment (2011)}
\label{appendix:collage}
The Collage Authoring Environment \citep{nowakowski11} was the winner of Elsevier Executable Paper Grand Challenge \citep{gabriel11}.
It is based on the GridSpace2\footnote{\url{http://dice.cyfronet.pl}} distributed computing environment\tonote{find citation}, which has a web-based graphic user interface.
Through its web-based interface, viewers of a paper can actively experiment with the parameters of a published paper's displayed outputs (for example figures).
\tonote{See how it containerizes the software environment}





\subsection{SHARE (2011)}
\label{appendix:SHARE}
SHARE (\url{https://is.ieis.tue.nl/staff/pvgorp/share}) is a web portal that hosts virtual machines (VMs) for storing the environment of a research project, for more, see \citet{vangorp11}.
The top project webpage above is still active, however, the virtual machines and SHARE system have been removed since 2019.

SHARE was recognized as second position in the Elsevier Executable Paper Grand Challenge \citep{gabriel11}.
Simply put, SHARE is just a VM that users can download and run.
The limitations of VMs for reproducibility were discussed in Appendix \ref{appendix:virtualmachines}, and the SHARE system does not specify any requirements on making the VM itself reproducible.





\subsection{Verifiable Computational Result, VCR (2011)}
\label{appendix:verifiableidentifier}
A ``verifiable computational result'' (\url{http://vcr.stanford.edu}) is an output (table, figure, or etc) that is associated with a ``verifiable result identifier'' (VRI), see \citet{gavish11}.
It was awarded the third prize in the Elsevier Executable Paper Grand Challenge \citep{gabriel11}.

A VRI is created using tags within the programming source that produced that output, also recording its version control or history.
This enables exact identification and citation of results.
The VRIs are automatically generated web-URLs that link to public VCR repositories containing the data, inputs and scripts, that may be re-executed.
According to \citet{gavish11}, the VRI generation routine has been implemented in MATLAB, R and Python, although only the MATLAB version was available during the writing of this paper.
VCR also has special \LaTeX{} macros for loading the respective VRI into the generated PDF.

Unfortunately most parts of the webpage are not complete at the time of this writing.
The VCR webpage contains an example PDF\footnote{\url{http://vcr.stanford.edu/paper.pdf}} that is generated with this system, however, the linked VCR repository (\inlinecode{http://vcr-stat.stanford.edu}) does not exist at the time of this writing.
Finally, the date of the files in the MATLAB extension tarball are set to 2011, hinting that probably VCR has been abandoned soon after the publication of \citet{gavish11}.





\subsection{SOLE (2012)}
\label{appendix:sole}
SOLE (Science Object Linking and Embedding) defines ``science objects'' (SOs) that can be manually linked with phrases of the published paper \citep[for more, see ][]{pham12,malik13}.
An SO is any code/content that is wrapped in begin/end tags with an associated type and name.
For example special commented lines in a Python, R or C program.
The SOLE command-line program parses the tagged file, generating metadata elements unique to the SO (including its URI).
SOLE also supports workflows as Galaxy tools \citep{goecks10}.

For reproducibility, \citet{pham12} suggest building a SOLE-based project in a virtual machine, using any custom package manager that is hosted on a private server to obtain a usable URI.
However, as described in Appendices \ref{appendix:independentenvironment} and \ref{appendix:packagemanagement}, unless  virtual machines are built with robust package managers, this is not a sustainable solution (the virtual machine itself is not reproducible).
Also, hosting a large virtual machine server with fixed IP on a hosting service like Amazon (as suggested there) will be very expensive.
The manual/artificial definition of tags to connect parts of the paper with the analysis scripts is also a caveat due to human error and incompleteness (tags the authors may not consider important, but may be useful later).
The solution of the proposed template (where anything coming out of the analysis is directly linked to the paper's contents with \LaTeX{} elements avoids these problems.





\subsection{Sumatra (2012)}
Sumatra (\url{http://neuralensemble.org/sumatra}) attempts to capture the environment information of a running project \citet{davison12}.
It is written in Python and is a command-line wrapper over the analysis script, by controlling its running, its able to capture the environment it was run in.
The captured environment can be viewed in plain text, a web interface.
Sumatra also provides \LaTeX/Sphinx features, which will link the paper with the project's Sumatra database.
This enables researchers to use a fixed version of a project's figures in the paper, even at later times (while the project is being developed).

The actual code that Sumatra wraps around, must itself be under version control, and it doesn't run if there is non-committed changes (although its not clear what happens if a commit is amended).
Since information on the environment has been captured, Sumatra is able to identify if it has changed since a previous run of the project.
Therefore Sumatra makes no attempt at storing the environment of the analysis as in Sciunit (see Appendix \ref{appendix:sciunit}), but its information.
Sumatra thus needs to know the language of the running program.





\subsection{Research Object (2013)}
\label{appendix:researchobject}

The Research object (\url{http://www.researchobject.org}) is collection of meta-data ontologies, to describe aggregation of resources, or workflows, see \citet{bechhofer13} and \citet{belhajjame15}.
It thus provides resources to link various workflow/analysis components (see Appendix \ref{appendix:existingtools}) into a final workflow.

\citet{bechhofer13} describes how a workflow in Apache Taverna (Appendix \ref{appendix:taverna}) can be translated into research objects.
The important thing is that the research object concept is not specific to any special workflow, it is just a metadata bundle which is only as robust in reproducing the result as the running workflow.
For example, Apache Taverna cannot guarantee exact reproducibility as described in Appendix \ref{appendix:taverna}.
But when a translator is written to convert the proposed template into research objects, they can do this.





\subsection{Sciunit (2015)}
\label{appendix:sciunit}
Sciunit (\url{https://sciunit.run}) defines ``sciunit''s that keep the executed commands for an analysis and all the necessary programs and libraries that are used in those commands.
It automatically parses all the executables in the script, and copies them, and their dependency libraries (down to the C library), into the sciunit.
Because the sciunit contains all the programs and necessary libraries, its possible to run it readily on other systems that have a similar CPU architecture.
For more, please see \citet{meng15}.

In our tests, Sciunit installed successfully, however we couldn't run it because of a dependency problem with the \inlinecode{tempfile} package (in the standard Python library).
Sciunit is written in Python 2 (which reached its end-of-life in January 1st, 2020) and its last Git commit in its main branch is from June 2018 (+1.5 years ago).
Recent activity in a \inlinecode{python3} branch shows that others are attempting to translate the code into Python 3 (the main author has graduated and apparently not working on Sciunit anymore).

Because we weren't able to run it, the following discussion will just be theoretical.
The main issue with Sciunit's approach is that the copied binaries are just black boxes.
Therefore, its not possible to see how the used binaries from the initial system were built, or possibly if they have security problems.
This is a major problem for scientific projects, in principle (not knowing how they programs were built) and practice (archiving a large volume sciunit for every step of an analysis requires a lot of space).





\subsection{Binder (2017)}
Binder (\url{https://mybinder.org}) is a tool to containerize already existing Jupyter based processing steps.
Users simply add a set of Binder-recognized configuration files to their repository.
Binder will build a Docker image and install all the dependencies inside of it with Conda (the list of necessary packages comes from Conda).
One good feature of Binder is that the imported Docker image must be tagged (something like a checksum).
This will ensure that future/latest updates of the imported Docker image are not mistakenly used.
However, it does not make sure that the dockerfile used by the imported Docker image follows a similar convention also.
Binder is used by \citet{jones19}.





\subsection{Gigantum (2017)}
Gigantum (\url{https://gigantum.com}) is a client/server system, in which the client is a web-based (graphical) interface that is installed as ``Gigantum Desktop'' within a Docker image and is free software (MIT License).
\tonote{I couldn't find the license to the server software yet, but it says that 20GB is provided for ``free'', so it is a little confusing if anyone can actually run the server.}
\tonote{I took the date from their PiPy page, where the first version 0.1 was published in November 2016.}

Gigantum uses Docker containers for an independent environment, Conda (or Pip) to install packages, Jupyter notebooks to edit and run code, and Git to store its history.
Simply put, its a high-level wrapper for combining these components.
Internally, a Gigantum project is organized as files in a directory that can be opened without their own client.
The file structure (which is under version control) includes codes, input data and output data.
As acknowledged on their own webpage, this greatly reduces the speed of Git operations, transmitting, or archiving the project.
Therefore there are size limits on the dataset/code sizes.
However, there is one directory which can be used to store files that must not be tracked.





\subsection{Popper (2017)}
\label{appendix:popper}
Popper (\url{https://falsifiable.us}) is a software implementation of the Popper Convention \citep{jimenez17}.
The Convention is a set of very generic conditions that are also applicable to the template proposed in this paper.
For a discussion on the convention, please see Section \ref{sec:principles}, in this section we'll review their software implementation.

The Popper team's own solution is through a command-line program called \inlinecode{popper}.
The \inlinecode{popper} program itself is written in Python, but job management is with the HashiCorp configuration language (HCL).
HCL is primarily aimed at running jobs on HashiCorp's ``infrastructure as a service'' (IaaS) products.
Until September 30th, 2019\footnote{\url{https://github.blog/changelog/2019-09-17-github-actions-will-stop-running-workflows-written-in-hcl}}, HCL was used by ``GitHub Actions'' to manage workflows. % maybe use the \textsuperscript{th} with dates?

To start a project, the \inlinecode{popper} command-line program builds a template, or ``scaffold'', which is a minimal set of files that can be run.
The scaffold is very similar to the raw template of that is proposed in this paper.
However, as of this writing, the scaffold isn't complete.
It lacks a manuscript and validation of outputs (as mentioned in the convention).
By default Popper runs in a Docker image (so root permissions are necessary), but Singularity is also supported.
See Appendix \ref{appendix:independentenvironment} for more on containers, and Appendix \ref{appendix:highlevelinworkflow} for using high-level languages in the workflow.





\subsection{Whole Tale (2019)}
\label{appendix:wholetale}

Whole Tale (\url{https://wholetale.org}) is a web-based platform for managing a project and organizing data provenance, see \citet{brinckman19}
It uses online editors like Jupyter or RStudio (see Appendix \ref{appendix:editors}) that are encapsulated in a Docker container (see Appendix \ref{appendix:independentenvironment}).

The web-based nature of Whole Tale's approach, and its dependency on many tools (which have many dependencies themselves) is a major limitation for future reproducibility.
For example, when following their own tutorial on ``Creating a new tale'', the provided Jupyter notebook could not be executed because of a dependency problem.
This has been reported to the authors as issue 113\footnote{\url{https://github.com/whole-tale/wt-design-docs/issues/113}}, but as all the second-order dependencies evolve, its not hard to envisage such dependency incompatibilities being the primary issue for older projects on Whole Tale.
Furthermore, the fact that a Tale is stored as a binary Docker container causes two important problems: 1) it requires a very large storage capacity for every project that is hosted there, making it very expensive to scale if demand expands. 2) It is not possible to see how the environment was built accurately (when the Dockerfile uses \inlinecode{apt}), for more on this, please see Appendix \ref{appendix:packagemanagement}.





\subsection{Things to add}
\url{https://sites.nationalacademies.org/cs/groups/pgasite/documents/webpage/pga_180684.pdf}, does the following classification of tools:
    \begin{itemize}
    \item Research environments: \href{http://vcr.stanford.edu}{Verifiable computational research} (discussed above), \href{http://www.sciencedirect.com/science/article/pii/S1877050911001207}{SHARE} (a Virtual Machine), \href{http://www.codeocean.com}{Code Ocean} (discussed above), \href{http://jupyter.org}{Jupyter} (discussed above), \href{https://yihui.name/knitr}{knitR} (based on Sweave, dynamic report generation with R), \href{https://cran.r-project.org}{Sweave} (Function in R, for putting R code within \LaTeX), \href{http://www.cyverse.org}{Cyverse} (proprietary web tool with servers for bioinformatics), \href{https://nanohub.org}{NanoHUB} (collection of Simulation Programs for nanoscale phenomena that run in the cloud), \href{https://www.elsevier.com/about/press-releases/research-and-journals/special-issue-computers-and-graphics-incorporates-executable-paper-grand-challenge-winner-collage-authoring-environment}{Collage Authoring Environment} (discussed above), \href{https://osf.io/ns2m3}{SOLE} (discussed above), \href{https://osf.io}{Open Science framework} (a hosting webpage), \href{https://www.vistrails.org}{VisTrails} (discussed above), \href{https://pypi.python.org/pypi/Sumatra}{Sumatra} (discussed above), \href{http://software.broadinstitute.org/cancer/software/genepattern}{GenePattern} (reviewed above), Image Processing On Line (\href{http://www.ipol.im}{IPOL}) journal (publishes full analysis scripts, but doesn't deal with dependencies), \href{https://github.com/systemslab/popper}{Popper} (reviewed above), \href{https://galaxyproject.org}{Galaxy} (reviewed above), \href{http://torch.ch}{Torch.ch} (finished project for neural networks on images), \href{http://wholetale.org/}{Whole Tale} (discussed above).
    \item Workflow systems: \href{http://www.taverna.org.uk}{Taverna}, \href{http://www.wings-workflows.org}{Wings}, \href{https://pegasus.isi.edu}{Pegasus}, \href{http://www.pgbovine.net/cde.html}{CDE}, \href{http://binder.org}{Binder}, \href{http://wiki.datakurator.org/wiki}{Kurator}, \href{https://kepler-project.org}{Kepler}, \href{https://github.com/everware}{Everware}, \href{http://cds.nyu.edu/projects/reprozip}{Reprozip}.
    \item Dissemination platforms: \href{http://researchcompendia.org}{ResearchCompendia}, \href{https://datacenterhub.org/about}{DataCenterHub}, \href{http://runmycode.org}, \href{https://www.chameleoncloud.org}{ChameleonCloud}, \href{https://occam.cs.pitt.edu}{Occam}, \href{http://rcloud.social/index.html}{RCloud}, \href{http://thedatahub.org}{TheDataHub}, \href{http://www.ahay.org/wiki/Package_overview}{Madagascar}.
    \end{itemize}




















\newpage
\section{Things remaining to add}
\begin{itemize}
\item Special volume on ``Reproducible research'' in the Computing in Science Engineering \citep{fomel09}.
\item ``I’ve learned that interactive programs are slavery (unless they include the ability to arrive in any previous state by means of a script).'' \citep{fomel09}.
\item \citet{fomel09} discuss the ``versioning problem'': on different systems, programs have different versions.
\item \citet{fomel09}: a C program written 20 years ago was still usable.
\item \citet{fomel09}: ``in an attempt to increase the size of the community, Matthias Schwab and I submitted a paper to Computers in Physics, one of CiSE’s forerunners. It was rejected. The editors said if everyone used Microsoft computers, everything would be easily reproducible. They also predicted the imminent demise of Fortran''.
\item \citet{alliez19}: Software citation, with a nice dependency plot for matplotlib.
  \item SC \href{https://sc19.supercomputing.org/submit/reproducibility-initiative}{Reproducibility Initiative} for mandatory Artifact Description (AD).
  \item \href{https://www.acm.org/publications/policies/artifact-review-badging}{Artifact review badging} by the Association of computing machinery (ACM).
  \item eLife journal \href{https://elifesciences.org/labs/b521cf4d/reproducible-document-stack-towards-a-scalable-solution-for-reproducible-articles}{announcement} on reproducible papers. \citet{lewis18} is their first reproducible paper.
  \item The \href{https://www.scientificpaperofthefuture.org}{Scientific paper of the future initiative} encourages geoscientists to include associate metadata with scientific papers \citep{gil16}.
  \item Digital objects: \url{http://doi.org/10.23728/b2share.b605d85809ca45679b110719b6c6cb11} and \url{http://doi.org/10.23728/b2share.4e8ac36c0dd343da81fd9e83e72805a0}
  \item \citet{mesirov10}, \citet{casadevall10}, \citet{peng11}: Importance of reproducible research.
  \item \citet{sandve13} is an editorial recommendation to publish reproducible results.
  \item \citet{easterbrook14} Free/open software for open science.
  \item \citet{peng15}: Importance of better statistical education.
  \item \citet{topalidou16}: Failed attempt to reproduce a result.
  \item \citet{hutton16} reproducibility in hydrology, criticized in \citet{melson17}.
  \item \citet{fomel09}: Editorial on reproducible research.
  \item \citet{munafo17}: Reproducibility in social sciences.
  \item \citet{stodden18}: Effectiveness of journal policy on computational reproducibility.
  \item \citet{fanelli18} is critical of the narrative that there is a ``reproducibility crisis'', and that its important to empower scientists.
  \item \citet{burrell18} open software (in particular Python) in heliophysics.
  \item \citet{allen18} show that many papers don't cite software.
  \item \citet{zhang18} explicity say that they won't release their code: ``We opt not to make the code used for the chemical evo-lution modeling publicly available because it is an important asset of the re-searchers’ toolkits''
  \item \citet{jones19} make genuine effort at reproducing every number in the paper (using Docker, Conda, and CGAT-core, and Binder), but they can ultimately only release scripts. They claim its not possible to reproduce that level of reproducibility, but here we show it is.
  \item LSST uses Kubernetes and docker for reproducibility \citep{banek19}.
  \item Interesting survey/paper on the importance of coding in science \citep{merali10}.
  \item Discuss the Provenance challenge \citep{moreau08}, showing the importance of meta data and provenance tracking.
    Especially that it is organized by teh medical scientists.
    Its webpage (for latest challenge) has a nice intro: \url{https://www.cccinnovationcenter.com/challenges/provenance-challenge}.
  \item In discussion: The XML provenance system is very interesting, scripts can be written to parse the Makefiles within this template to generate such XML outputs for easy standard metadata parsing.
    The XML that contains a log of the outputs is also interesting.
  \item \citet{becker17} Discuss reproducibility methods in R.
  \item Elsevier Executable Paper Grand Challenge\footnote{\url{https://shar.es/a3dgl2}} \citep{gabriel11}.
  \item \citet{menke20} show how software identifability has seen the best improvement, so there is hope!
  \item Nature's collection on papers about reproducibility: \url{https://www.nature.com/collections/prbfkwmwvz}.
  \item Nice links for applying FAIR principles in research software: \url{https://www.rd-alliance.org/group/software-source-code-ig/wiki/fair4software-reading-materials}
  \item Jupyter Notebooks and problems with reproducibility: \citet{rule18} and \citet{pimentel19}.
  \item Reproducibility certification \url{https://www.cascad.tech}.
  \item \url{https://plato.stanford.edu/entries/scientific-reproducibility}.
  \item
Modern analysis tools are almost entirely implemented as software packages.
This has lead many scientists to adopt solutions that software developers use for reproducing software (for example to fix bugs, or avoid security issues).
These tools and how they are used are thorougly reviewed in Appendices \ref{appendix:existingtools} and \ref{appendix:existingsolutions}.
However, the problem of reproducibility in the sciences is more complicated and subtle than that of software engineering.
This difference can be broken up into the following categories, which are described more fully below:
1) Reading vs. executing, 2) Archiving how software is used and 3) Citation of the software/methods used for scientific credit.

The first difference is because in the sciences, reproducibility is not merely a problem of re-running a research project (where a binary blob like a container or virtual machine is sufficient).
For a scientist it is more important to read/study a method of a paper that is 1, 10, or 100 years old.
The hardware to execute the code may have become obsolete, or it may require too much processing power, storage, or time for another random scientist to execute.
Another scientist just needs to be assured that the commands they are reading is exactly what was (and can potentially be) executed.

On the second point, scientists are devoting a smaller fraction of their papers to the technical aspects of the work because they are done increasingly by pre-written software programs and libraries.
Therefore, scientific papers are no longer a complete repository for preserving and archiving very important aspects of the scientific endeavor and hard gained experience.
Attempts such as Software Heritage\footnote{\url{https://www.softwareheritage.org}} \citep{dicosmo18} do a wonderful job at long term preservation and archival of the software source code.
However, preservation of the software's raw code is only part of the process, it is also critically important to preserve how the software was used: with what configuration or run-time options, for what kinds of problems, in conjunction with which other software tools and etc.

The third major difference was scientific credit, which is measured in units of citations, not dollars.
As described above, scientific software are playing an increasingly important role in modern science.
Because of the domain-specific knowledge necessary to produce such software, they are mostly written by scientists for scientists.
Therefore a significant amount of effort and research funding has gone into producing scientific software.
Atleast for the software that do have an accompanying paper, it is thus important that those papers be cited when they are used.
\end{itemize}



%% Mention all used software in an appendix.
\section{Software acknowledgement}
\label{appendix:softwareacknowledge}
\input{tex/build/macros/dependencies.tex}

%% Finish LaTeX
\end{document}

%% This file is part of the reproducible paper template
%%   https://gitlab.com/makhlaghi/reproducible-paper
%
%% This template is free software: you can redistribute it and/or modify it
%% under the terms of the GNU General Public License as published by the
%% Free Software Foundation, either version 3 of the License, or (at your
%% option) any later version.
%
%% This template is distributed in the hope that it will be useful, but
%% WITHOUT ANY WARRANTY; without even the implied warranty of
%% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
%% General Public License for more details.
%
%% You should have received a copy of the GNU General Public License along
%% with Template.  If not, see <https://www.gnu.org/licenses/>.