1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
|
@book{26HadoopMapreduce,
title = {26 {{Hadoop Mapreduce}}.{{Pdf}}}
}
@book{26HadoopMapreduce,
title = {26 {{Hadoop Mapreduce}}.{{Pdf}}}
}
@article{ambroseLessonsAvalancheNumbers,
title = {Lessons from the {{Avalanche}} of {{Numbers}}: {{Big Data}} in {{Historical Perspective}}},
author = {Ambrose, Meg Leta},
volume = {11},
pages = {78},
abstract = {The big data revolution, like many changes associated with technological advancement, is often compared to the industrial revolution to create a frame of reference for its transformative power, or portrayed as altogether new. This article argues that between the industrial revolution and the digital revolution is a more valuable, yet overlooked period: the probabilistic revolution that began with the avalanche of printed numbers between 1820 and 1840. By comparing the many similarities between big data today and the avalanche of numbers in the 1800s, the article situates big data in the early stages of a prolonged transition to a potentially transformative epistemic revolution, like the probabilistic revolution. The widespread changes in and characteristics of a society flooded by data results in a transitional state that creates unique challenges for policy efforts by disrupting foundational principles relied upon for data protection. The potential of a widespread, lengthy transition also places the law in a pivotal position to shape and guide big data-based inquiry through to whatever epistemic shift may lie ahead.},
language = {English}
}
@article{ambroseLessonsAvalancheNumbers,
title = {Lessons from the {{Avalanche}} of {{Numbers}}: {{Big Data}} in {{Historical Perspective}}},
author = {Ambrose, Meg Leta},
volume = {11},
pages = {78},
abstract = {The big data revolution, like many changes associated with technological advancement, is often compared to the industrial revolution to create a frame of reference for its transformative power, or portrayed as altogether new. This article argues that between the industrial revolution and the digital revolution is a more valuable, yet overlooked period: the probabilistic revolution that began with the avalanche of printed numbers between 1820 and 1840. By comparing the many similarities between big data today and the avalanche of numbers in the 1800s, the article situates big data in the early stages of a prolonged transition to a potentially transformative epistemic revolution, like the probabilistic revolution. The widespread changes in and characteristics of a society flooded by data results in a transitional state that creates unique challenges for policy efforts by disrupting foundational principles relied upon for data protection. The potential of a widespread, lengthy transition also places the law in a pivotal position to shape and guide big data-based inquiry through to whatever epistemic shift may lie ahead.},
language = {English}
}
@article{bentebibelInductionICOSCXCR3,
title = {Induction of {{ICOS}}+{{CXCR3}}+{{CXCR5}}+ {{TH Cells Correlates}} with {{Antibody Responses}} to {{Influenza Vaccination}}},
author = {Bentebibel, Salah-Eddine and Lopez, Santiago and Obermoser, Gerlinde and Schmitt, Nathalie and Mueller, Cynthia and Harrod, Carson and Flano, Emilio and Mejias, Asuncion and Albrecht, Randy A and Blankenship, Derek and Xu, Hui and Pascual, Virginia and Banchereau, Jacques and {Garcia-Sastre}, Adolfo and Palucka, Anna Karolina and Ramilo, Octavio and Ueno, Hideki},
pages = {11},
language = {English}
}
@article{bentebibelInductionICOSCXCR3,
title = {Induction of {{ICOS}}+{{CXCR3}}+{{CXCR5}}+ {{TH Cells Correlates}} with {{Antibody Responses}} to {{Influenza Vaccination}}},
author = {Bentebibel, Salah-Eddine and Lopez, Santiago and Obermoser, Gerlinde and Schmitt, Nathalie and Mueller, Cynthia and Harrod, Carson and Flano, Emilio and Mejias, Asuncion and Albrecht, Randy A and Blankenship, Derek and Xu, Hui and Pascual, Virginia and Banchereau, Jacques and {Garcia-Sastre}, Adolfo and Palucka, Anna Karolina and Ramilo, Octavio and Ueno, Hideki},
pages = {11},
language = {English}
}
@book{bishopPatternRecognitionMachine2006,
title = {Pattern {{Recognition}} and {{Machine Learning}}},
author = {Bishop, Christopher M.},
year = {2006},
publisher = {{Springer}},
address = {{New York}},
isbn = {978-0-387-31073-2},
keywords = {Machine learning,Pattern perception},
language = {English},
lccn = {Q327 .B52 2006},
series = {Information {{Science}} and {{Statistics}}}
}
@book{bishopPatternRecognitionMachine2006,
title = {Pattern {{Recognition}} and {{Machine Learning}}},
author = {Bishop, Christopher M.},
year = {2006},
publisher = {{Springer}},
address = {{New York}},
isbn = {978-0-387-31073-2},
keywords = {Machine learning,Pattern perception},
language = {English},
lccn = {Q327 .B52 2006},
series = {Information {{Science}} and {{Statistics}}}
}
@article{chapmanStepbyStepDataMining,
title = {Step-by-{{Step Data Mining Guide}}},
author = {Chapman, Pete and Clinton, Julian and Kerber, Randy and Khabaza, Thomas and Reinartz, Thomas and Shearer, Colin and Wirth, R{\"u}diger},
pages = {76},
language = {English}
}
@article{chapmanStepbystepDataMining,
title = {Step-by-Step Data Mining Guide},
author = {Chapman, Pete and Clinton, Julian and Kerber, Randy and Khabaza, Thomas and Reinartz, Thomas and Shearer, Colin and Wirth, R{\"u}diger},
pages = {76},
language = {English}
}
@article{chapmanStepbyStepDataMining,
title = {Step-by-{{Step Data Mining Guide}}},
author = {Chapman, Pete and Clinton, Julian and Kerber, Randy and Khabaza, Thomas and Reinartz, Thomas and Shearer, Colin and Wirth, R{\"u}diger},
pages = {76},
language = {English}
}
@article{chapmanStepbystepDataMining,
title = {Step-by-Step Data Mining Guide},
author = {Chapman, Pete and Clinton, Julian and Kerber, Randy and Khabaza, Thomas and Reinartz, Thomas and Shearer, Colin and Wirth, R{\"u}diger},
pages = {76},
file = {/home/mike/Dropbox/bibliography/pdfs/Chapman et al_Step-by-step data mining guide.pdf},
language = {English}
}
@article{chattopadhyaySinglecellTechnologiesMonitoring2014,
title = {Single-Cell Technologies for Monitoring Immune Systems},
author = {Chattopadhyay, Pratip K. and Gierahn, Todd M. and Roederer, Mario and Love, J. Christopher},
year = {2014},
month = feb,
volume = {15},
pages = {128--135},
publisher = {{Nature Publishing Group}},
issn = {1529-2916},
doi = {10.1038/ni.2796},
abstract = {Love and colleagues review the limitations of bulk measurements for monitoring the immune system and explore advances in single-cell technologies that overcome these problems.},
journal = {Nature Immunology},
language = {English},
number = {2}
}
@article{chattopadhyaySinglecellTechnologiesMonitoring2014,
title = {Single-Cell Technologies for Monitoring Immune Systems},
author = {Chattopadhyay, Pratip K. and Gierahn, Todd M. and Roederer, Mario and Love, J. Christopher},
year = {2014},
month = feb,
volume = {15},
pages = {128--135},
publisher = {{Nature Publishing Group}},
issn = {1529-2916},
doi = {10.1038/ni.2796},
abstract = {Love and colleagues review the limitations of bulk measurements for monitoring the immune system and explore advances in single-cell technologies that overcome these problems.},
file = {/home/mike/Dropbox/bibliography/pdfs/Chattopadhyay et al_2014_Single-cell technologies for monitoring immune systems.pdf},
journal = {Nature Immunology},
language = {English},
number = {2}
}
@article{clarkHandbookComputationalLinguistics,
title = {The {{Handbook}} of {{Computational Linguistics}} and {{Natural Language Processing}}},
author = {Clark, Alexander and Fox, Chris and Lappin, Shalom},
pages = {801},
language = {English}
}
@article{clarkHandbookComputationalLinguistics,
title = {The {{Handbook}} of {{Computational Linguistics}} and {{Natural Language Processing}}},
author = {Clark, Alexander and Fox, Chris and Lappin, Shalom},
pages = {801},
language = {English}
}
@book{CommonProblemsGenerative,
title = {Common {{Problems}} | {{Generative Adversarial Networks}}},
language = {English}
}
@book{CommonProblemsGenerative,
title = {Common {{Problems}} | {{Generative Adversarial Networks}}},
language = {English}
}
@book{DataAnalysisVisualization,
title = {Data {{Analysis}} and {{Visualization}}: {{Supervised Learning}} - {{Classification}} (1/2)},
language = {English}
}
@book{DataAnalysisVisualization,
title = {Data {{Analysis}} and {{Visualization}}: {{Supervised Learning}} - {{Classification}} (1/2)},
language = {English}
}
@article{DataScienceSociety,
title = {Data {{Science}} and {{Society}} - {{Supervised Learning}}},
pages = {32},
language = {English}
}
@article{DataScienceSociety,
title = {Data {{Science}} and {{Society}} - {{Supervised Learning}}},
pages = {32},
language = {English}
}
@article{DataScienceSocietya,
title = {Data {{Science}} and {{Society}} - {{Descriptive Statistics}}},
pages = {27},
language = {English}
}
@article{DataScienceSocietya,
title = {Data {{Science}} and {{Society}} - {{Descriptive Statistics}}},
pages = {27},
language = {English}
}
@article{deanMapReduceSimplifiedData2008,
title = {{{MapReduce}}: {{Simplified Data Processing}} on {{Large Clusters}}},
shorttitle = {{{MapReduce}}},
author = {Dean, Jeffrey and Ghemawat, Sanjay},
year = {2008},
month = jan,
volume = {51},
pages = {107--113},
issn = {0001-0782, 1557-7317},
doi = {10.1145/1327452.1327492},
abstract = {MapReduce is a programming model and an associated implementation for processing and generating large data sets. Users specify a map function that processes a key/value pair to generate a set of intermediate key/value pairs, and a reduce function that merges all intermediate values associated with the same intermediate key. Many real world tasks are expressible in this model, as shown in the paper.},
journal = {Communications of the ACM},
language = {English},
number = {1}
}
@article{deanMapReduceSimplifiedData2008,
title = {{{MapReduce}}: {{Simplified Data Processing}} on {{Large Clusters}}},
shorttitle = {{{MapReduce}}},
author = {Dean, Jeffrey and Ghemawat, Sanjay},
year = {2008},
month = jan,
volume = {51},
pages = {107--113},
issn = {0001-0782, 1557-7317},
doi = {10.1145/1327452.1327492},
abstract = {MapReduce is a programming model and an associated implementation for processing and generating large data sets. Users specify a map function that processes a key/value pair to generate a set of intermediate key/value pairs, and a reduce function that merges all intermediate values associated with the same intermediate key. Many real world tasks are expressible in this model, as shown in the paper.},
journal = {Communications of the ACM},
language = {English},
number = {1}
}
@article{dejongHaemagglutinationinhibitingAntibodyInfluenza2003,
title = {Haemagglutination-Inhibiting Antibody to Influenza Virus},
author = {{de Jong}, J. C. and Palache, A. M. and Beyer, W. E. P. and Rimmelzwaan, G. F. and Boon, A. C. M. and Osterhaus, A. D. M. E.},
year = {2003},
volume = {115},
pages = {63--73},
issn = {1424-6074},
abstract = {The results of the haemagglutination-inhibiting (HI) antibody test for influenza virus antibody in human sera closely match those produced by virus neutralization assays and are predictive of protection. On the basis of the data derived from 12 publications concerning healthy adults, we estimated the median HI titre protecting 50\% of the vaccinees against the virus concerned at 28. This finding supports the current policy requiring vaccines to induce serum HI titres of {$>$} or = 40 to the vaccine viruses in the majority of the vaccinees. Unfortunately similar studies are scanty for the elderly, the group most at risk of influenza. There still remain many unsolved technical problems with the HI assay and we recommend that these problems be studied and the virus neutralization test as a predictor of resistance to influenza be assessed. Although the studies on this issue often give conflicting results, they generally show that HI antibody responses to influenza vaccination tend to diminish with increasing age, when health is often compromized. Advanced age in itself seems not to be an independent factor in this process. However, even in completely healthy elderly individuals the response to vaccination with an antigenically new virus may be strongly reduced compared with younger vaccinees.},
journal = {Developments in Biologicals},
keywords = {Antibodies,Hemagglutinin Glycoproteins,Humans,Influenza A virus,Influenza Vaccines,Influenza Virus,Neutralization Tests,Viral},
language = {English},
pmid = {15088777}
}
@article{dejongHaemagglutinationinhibitingAntibodyInfluenza2003,
title = {Haemagglutination-Inhibiting Antibody to Influenza Virus},
author = {{de Jong}, J. C. and Palache, A. M. and Beyer, W. E. P. and Rimmelzwaan, G. F. and Boon, A. C. M. and Osterhaus, A. D. M. E.},
year = {2003},
volume = {115},
pages = {63--73},
issn = {1424-6074},
abstract = {The results of the haemagglutination-inhibiting (HI) antibody test for influenza virus antibody in human sera closely match those produced by virus neutralization assays and are predictive of protection. On the basis of the data derived from 12 publications concerning healthy adults, we estimated the median HI titre protecting 50\% of the vaccinees against the virus concerned at 28. This finding supports the current policy requiring vaccines to induce serum HI titres of {$>$} or = 40 to the vaccine viruses in the majority of the vaccinees. Unfortunately similar studies are scanty for the elderly, the group most at risk of influenza. There still remain many unsolved technical problems with the HI assay and we recommend that these problems be studied and the virus neutralization test as a predictor of resistance to influenza be assessed. Although the studies on this issue often give conflicting results, they generally show that HI antibody responses to influenza vaccination tend to diminish with increasing age, when health is often compromized. Advanced age in itself seems not to be an independent factor in this process. However, even in completely healthy elderly individuals the response to vaccination with an antigenically new virus may be strongly reduced compared with younger vaccinees.},
journal = {Developments in Biologicals},
keywords = {Antibodies,Hemagglutinin Glycoproteins,Humans,Influenza A virus,Influenza Vaccines,Influenza Virus,Neutralization Tests,Viral},
language = {English},
pmid = {15088777}
}
@article{dosovitskiyGeneratingImagesPerceptual2016,
title = {Generating {{Images}} with {{Perceptual Similarity Metrics}} Based on {{Deep Networks}}},
author = {Dosovitskiy, Alexey and Brox, Thomas},
year = {2016},
month = feb,
abstract = {Image-generating machine learning models are typically trained with loss functions based on distance in the image space. This often leads to over-smoothed results. We propose a class of loss functions, which we call deep perceptual similarity metrics (DeePSiM), that mitigate this problem. Instead of computing distances in the image space, we compute distances between image features extracted by deep neural networks. This metric better reflects perceptually similarity of images and thus leads to better results. We show three applications: autoencoder training, a modification of a variational autoencoder, and inversion of deep convolutional networks. In all cases, the generated images look sharp and resemble natural images.},
archiveprefix = {arXiv},
eprint = {1602.02644},
eprinttype = {arxiv},
journal = {arXiv:1602.02644 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning,Computer Science - Neural and Evolutionary Computing},
language = {English},
primaryclass = {cs}
}
@article{dosovitskiyGeneratingImagesPerceptual2016,
title = {Generating {{Images}} with {{Perceptual Similarity Metrics}} Based on {{Deep Networks}}},
author = {Dosovitskiy, Alexey and Brox, Thomas},
year = {2016},
month = feb,
abstract = {Image-generating machine learning models are typically trained with loss functions based on distance in the image space. This often leads to over-smoothed results. We propose a class of loss functions, which we call deep perceptual similarity metrics (DeePSiM), that mitigate this problem. Instead of computing distances in the image space, we compute distances between image features extracted by deep neural networks. This metric better reflects perceptually similarity of images and thus leads to better results. We show three applications: autoencoder training, a modification of a variational autoencoder, and inversion of deep convolutional networks. In all cases, the generated images look sharp and resemble natural images.},
archiveprefix = {arXiv},
eprint = {1602.02644},
eprinttype = {arxiv},
journal = {arXiv:1602.02644 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning,Computer Science - Neural and Evolutionary Computing},
language = {English},
primaryclass = {cs}
}
@article{dosovitskiyGeneratingImagesPerceptual2016a,
title = {Generating {{Images}} with {{Perceptual Similarity Metrics}} Based on {{Deep Networks}}},
author = {Dosovitskiy, Alexey and Brox, Thomas},
year = {2016},
month = feb,
abstract = {Image-generating machine learning models are typically trained with loss functions based on distance in the image space. This often leads to over-smoothed results. We propose a class of loss functions, which we call deep perceptual similarity metrics (DeePSiM), that mitigate this problem. Instead of computing distances in the image space, we compute distances between image features extracted by deep neural networks. This metric better reflects perceptually similarity of images and thus leads to better results. We show three applications: autoencoder training, a modification of a variational autoencoder, and inversion of deep convolutional networks. In all cases, the generated images look sharp and resemble natural images.},
archiveprefix = {arXiv},
eprint = {1602.02644},
eprinttype = {arxiv},
journal = {arXiv:1602.02644 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning,Computer Science - Neural and Evolutionary Computing},
language = {English},
primaryclass = {cs}
}
@article{dosovitskiyGeneratingImagesPerceptual2016a,
title = {Generating {{Images}} with {{Perceptual Similarity Metrics}} Based on {{Deep Networks}}},
author = {Dosovitskiy, Alexey and Brox, Thomas},
year = {2016},
month = feb,
abstract = {Image-generating machine learning models are typically trained with loss functions based on distance in the image space. This often leads to over-smoothed results. We propose a class of loss functions, which we call deep perceptual similarity metrics (DeePSiM), that mitigate this problem. Instead of computing distances in the image space, we compute distances between image features extracted by deep neural networks. This metric better reflects perceptually similarity of images and thus leads to better results. We show three applications: autoencoder training, a modification of a variational autoencoder, and inversion of deep convolutional networks. In all cases, the generated images look sharp and resemble natural images.},
archiveprefix = {arXiv},
eprint = {1602.02644},
eprinttype = {arxiv},
journal = {arXiv:1602.02644 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning,Computer Science - Neural and Evolutionary Computing},
language = {English},
primaryclass = {cs}
}
@article{dumoulinGuideConvolutionArithmetic2018,
title = {A Guide to Convolution Arithmetic for Deep Learning},
author = {Dumoulin, Vincent and Visin, Francesco},
year = {2018},
month = jan,
abstract = {We introduce a guide to help deep learning practitioners understand and manipulate convolutional neural network architectures. The guide clarifies the relationship between various properties (input shape, kernel shape, zero padding, strides and output shape) of convolutional, pooling and transposed convolutional layers, as well as the relationship between convolutional and transposed convolutional layers. Relationships are derived for various cases, and are illustrated in order to make them intuitive.},
archiveprefix = {arXiv},
eprint = {1603.07285},
eprinttype = {arxiv},
journal = {arXiv:1603.07285 [cs, stat]},
keywords = {Computer Science - Machine Learning,Computer Science - Neural and Evolutionary Computing,Statistics - Machine Learning},
language = {English},
primaryclass = {cs, stat}
}
@article{dumoulinGuideConvolutionArithmetic2018,
title = {A Guide to Convolution Arithmetic for Deep Learning},
author = {Dumoulin, Vincent and Visin, Francesco},
year = {2018},
month = jan,
abstract = {We introduce a guide to help deep learning practitioners understand and manipulate convolutional neural network architectures. The guide clarifies the relationship between various properties (input shape, kernel shape, zero padding, strides and output shape) of convolutional, pooling and transposed convolutional layers, as well as the relationship between convolutional and transposed convolutional layers. Relationships are derived for various cases, and are illustrated in order to make them intuitive.},
archiveprefix = {arXiv},
eprint = {1603.07285},
eprinttype = {arxiv},
journal = {arXiv:1603.07285 [cs, stat]},
keywords = {Computer Science - Machine Learning,Computer Science - Neural and Evolutionary Computing,Statistics - Machine Learning},
language = {English},
primaryclass = {cs, stat}
}
@article{fdaGuidanceIndustryClinical2007,
title = {Guidance for {{Industry}}: {{Clinical Data Needed}} to {{Support}} the {{Licensure}} of {{Seasonal Inactivated Influenza Vaccines}}},
author = {FDA, FDA},
year = {2007},
pages = {17},
language = {English}
}
@article{fdaGuidanceIndustryClinical2007,
title = {Guidance for {{Industry}}: {{Clinical Data Needed}} to {{Support}} the {{Licensure}} of {{Seasonal Inactivated Influenza Vaccines}}},
author = {FDA, FDA},
year = {2007},
pages = {17},
language = {English}
}
@article{furmanApoptosisOtherImmune2013,
title = {Apoptosis and Other Immune Biomarkers Predict Influenza Vaccine Responsiveness},
author = {Furman, David and Jojic, Vladimir and Kidd, Brian and {Shen-Orr}, Shai and Price, Jordan and Jarrell, Justin and Tse, Tiffany and Huang, Huang and Lund, Peder and Maecker, Holden T and Utz, Paul J and Dekker, Cornelia L and Koller, Daphne and Davis, Mark M},
year = {2013},
month = jan,
volume = {9},
pages = {659},
publisher = {{John Wiley \& Sons, Ltd}},
issn = {1744-4292},
doi = {10.1038/msb.2013.15},
abstract = {Despite the importance of the immune system in many diseases, there are currently no objective benchmarks of immunological health. In an effort to identifying such markers, we used influenza vaccination in 30 young (20?30 years) and 59 older subjects (60 to {$>$}89 years) as models for strong and weak immune responses, respectively, and assayed their serological responses to influenza strains as well as a wide variety of other parameters, including gene expression, antibodies to hemagglutinin peptides, serum cytokines, cell subset phenotypes and in vitro cytokine stimulation. Using machine learning, we identified nine variables that predict the antibody response with 84\% accuracy. Two of these variables are involved in apoptosis, which positively associated with the response to vaccination and was confirmed to be a contributor to vaccine responsiveness in mice. The identification of these biomarkers provides new insights into what immune features may be most important for immune health.},
journal = {Molecular Systems Biology},
keywords = {aging,apoptosis,influenza,systems immunology,vaccinology},
number = {1}
}
@article{furmanApoptosisOtherImmune2013,
title = {Apoptosis and Other Immune Biomarkers Predict Influenza Vaccine Responsiveness},
author = {Furman, David and Jojic, Vladimir and Kidd, Brian and {Shen-Orr}, Shai and Price, Jordan and Jarrell, Justin and Tse, Tiffany and Huang, Huang and Lund, Peder and Maecker, Holden T and Utz, Paul J and Dekker, Cornelia L and Koller, Daphne and Davis, Mark M},
year = {2013},
month = jan,
volume = {9},
pages = {659},
publisher = {{John Wiley \& Sons, Ltd}},
issn = {1744-4292},
doi = {10.1038/msb.2013.15},
abstract = {Despite the importance of the immune system in many diseases, there are currently no objective benchmarks of immunological health. In an effort to identifying such markers, we used influenza vaccination in 30 young (20?30 years) and 59 older subjects (60 to {$>$}89 years) as models for strong and weak immune responses, respectively, and assayed their serological responses to influenza strains as well as a wide variety of other parameters, including gene expression, antibodies to hemagglutinin peptides, serum cytokines, cell subset phenotypes and in vitro cytokine stimulation. Using machine learning, we identified nine variables that predict the antibody response with 84\% accuracy. Two of these variables are involved in apoptosis, which positively associated with the response to vaccination and was confirmed to be a contributor to vaccine responsiveness in mice. The identification of these biomarkers provides new insights into what immune features may be most important for immune health.},
file = {/home/mike/Dropbox/bibliography/pdfs/Furman et al_2013_Apoptosis and other immune biomarkers predict influenza vaccine responsiveness.pdf},
journal = {Molecular Systems Biology},
keywords = {aging,apoptosis,influenza,systems immunology,vaccinology},
number = {1}
}
@article{galliEndOmicsHigh2019,
title = {The End of Omics? {{High}} Dimensional Single Cell Analysis in Precision Medicine},
shorttitle = {The End of Omics?},
author = {Galli, Edoardo and Friebel, Ekaterina and Ingelfinger, Florian and Unger, Susanne and N{\'u}{\~n}ez, Nicol{\'a}s Gonzalo and Becher, Burkhard},
year = {2019},
volume = {49},
pages = {212--220},
issn = {1521-4141},
doi = {10.1002/eji.201847758},
abstract = {High-dimensional single-cell (HDcyto) technologies, such as mass cytometry (CyTOF) and flow cytometry, are the key techniques that hold a great promise for deciphering complex biological processes. During the last decade, we witnessed an exponential increase of novel HDcyto technologies that are able to deliver an in-depth profiling in different settings, such as various autoimmune diseases and cancer. The concurrent advance of custom data-mining algorithms has provided a rich substrate for the development of novel tools in translational medicine research. HDcyto technologies have been successfully used to investigate cellular cues driving pathophysiological conditions, and to identify disease-specific signatures that may serve as diagnostic biomarkers or therapeutic targets. These technologies now also offer the possibility to describe a complete cellular environment, providing unanticipated insights into human biology. In this review, we present an update on the current cutting-edge HDcyto technologies and their applications, which are going to be fundamental in providing further insights into human immunology and pathophysiology of various diseases. Importantly, we further provide an overview of the main algorithms currently available for data mining, together with the conceptual workflow for high-dimensional cytometric data handling and analysis. Overall, this review aims to be a handy overview for immunologists on how to design, develop and read HDcyto data.},
copyright = {\textcopyright{} 2019 WILEY-VCH Verlag GmbH \& Co. KGaA, Weinheim},
journal = {European Journal of Immunology},
keywords = {bioinformatic tools,diagnostic biomarkers,flow cytometry,mass cytometry (CyTOF),single cell analysis},
language = {English},
number = {2}
}
@article{galliEndOmicsHigh2019,
title = {The End of Omics? {{High}} Dimensional Single Cell Analysis in Precision Medicine},
shorttitle = {The End of Omics?},
author = {Galli, Edoardo and Friebel, Ekaterina and Ingelfinger, Florian and Unger, Susanne and N{\'u}{\~n}ez, Nicol{\'a}s Gonzalo and Becher, Burkhard},
year = {2019},
volume = {49},
pages = {212--220},
issn = {1521-4141},
doi = {10.1002/eji.201847758},
abstract = {High-dimensional single-cell (HDcyto) technologies, such as mass cytometry (CyTOF) and flow cytometry, are the key techniques that hold a great promise for deciphering complex biological processes. During the last decade, we witnessed an exponential increase of novel HDcyto technologies that are able to deliver an in-depth profiling in different settings, such as various autoimmune diseases and cancer. The concurrent advance of custom data-mining algorithms has provided a rich substrate for the development of novel tools in translational medicine research. HDcyto technologies have been successfully used to investigate cellular cues driving pathophysiological conditions, and to identify disease-specific signatures that may serve as diagnostic biomarkers or therapeutic targets. These technologies now also offer the possibility to describe a complete cellular environment, providing unanticipated insights into human biology. In this review, we present an update on the current cutting-edge HDcyto technologies and their applications, which are going to be fundamental in providing further insights into human immunology and pathophysiology of various diseases. Importantly, we further provide an overview of the main algorithms currently available for data mining, together with the conceptual workflow for high-dimensional cytometric data handling and analysis. Overall, this review aims to be a handy overview for immunologists on how to design, develop and read HDcyto data.},
copyright = {\textcopyright{} 2019 WILEY-VCH Verlag GmbH \& Co. KGaA, Weinheim},
file = {/home/mike/Dropbox/bibliography/pdfs/Galli et al_2019_The end of omics.pdf},
journal = {European Journal of Immunology},
keywords = {bioinformatic tools,diagnostic biomarkers,flow cytometry,mass cytometry (CyTOF),single cell analysis},
language = {English},
number = {2}
}
@article{goodfellowGenerativeAdversarialNets,
title = {Generative {{Adversarial Nets}}},
author = {Goodfellow, Ian and {Pouget-Abadie}, Jean and Mirza, Mehdi and Xu, Bing and {Warde-Farley}, David and Ozair, Sherjil and Courville, Aaron and Bengio, Yoshua},
pages = {9},
language = {English}
}
@article{goodfellowGenerativeAdversarialNets,
title = {Generative {{Adversarial Nets}}},
author = {Goodfellow, Ian and {Pouget-Abadie}, Jean and Mirza, Mehdi and Xu, Bing and {Warde-Farley}, David and Ozair, Sherjil and Courville, Aaron and Bengio, Yoshua},
pages = {9},
file = {/home/mike/Dropbox/bibliography/pdfs/Goodfellow et al_Generative Adversarial Nets.pdf},
language = {English}
}
@article{greenMortalityAttributableInfluenza2013,
title = {Mortality {{Attributable}} to {{Influenza}} in {{England}} and {{Wales Prior}} to, during and after the 2009 {{Pandemic}}},
author = {Green, Helen K. and Andrews, Nick and Fleming, Douglas and Zambon, Maria and Pebody, Richard},
year = {2013},
month = dec,
volume = {8},
pages = {e79360},
publisher = {{Public Library of Science}},
issn = {1932-6203},
doi = {10.1371/journal.pone.0079360},
abstract = {Very different influenza seasons have been observed from 2008/09\textendash 2011/12 in England and Wales, with the reported burden varying overall and by age group. The objective of this study was to estimate the impact of influenza on all-cause and cause-specific mortality during this period. Age-specific generalised linear regression models fitted with an identity link were developed, modelling weekly influenza activity through multiplying clinical influenza-like illness consultation rates with proportion of samples positive for influenza A or B. To adjust for confounding factors, a similar activity indicator was calculated for Respiratory Syncytial Virus. Extreme temperature and seasonal trend were controlled for. Following a severe influenza season in 2008/09 in 65+yr olds (estimated excess of 13,058 influenza A all-cause deaths), attributed all-cause mortality was not significant during the 2009 pandemic in this age group and comparatively low levels of influenza A mortality were seen in post-pandemic seasons. The age shift of the burden of seasonal influenza from the elderly to young adults during the pandemic continued into 2010/11; a comparatively larger impact was seen with the same circulating A(H1N1)pdm09 strain, with the burden of influenza A all-cause excess mortality in 15\textendash 64 yr olds the largest reported during 2008/09\textendash 2011/12 (436 deaths in 15\textendash 44 yr olds and 1,274 in 45\textendash 64 yr olds). On average, 76\% of seasonal influenza A all-age attributable deaths had a cardiovascular or respiratory cause recorded (average of 5,849 influenza A deaths per season), with nearly a quarter reported for other causes (average of 1,770 influenza A deaths per season), highlighting the importance of all-cause as well as cause-specific estimates. No significant influenza B attributable mortality was detected by season, cause or age group. This analysis forms part of the preparatory work to establish a routine mortality monitoring system ahead of introduction of the UK universal childhood seasonal influenza vaccination programme in 2013/14.},
journal = {PLOS ONE},
keywords = {Age groups,Death rates,England,Influenza,Influenza A virus,Pandemics,Pneumonia,Respiratory infections},
language = {English},
number = {12}
}
@article{greenMortalityAttributableInfluenza2013,
title = {Mortality {{Attributable}} to {{Influenza}} in {{England}} and {{Wales Prior}} to, during and after the 2009 {{Pandemic}}},
author = {Green, Helen K. and Andrews, Nick and Fleming, Douglas and Zambon, Maria and Pebody, Richard},
year = {2013},
month = dec,
volume = {8},
pages = {e79360},
publisher = {{Public Library of Science}},
issn = {1932-6203},
doi = {10.1371/journal.pone.0079360},
abstract = {Very different influenza seasons have been observed from 2008/09\textendash 2011/12 in England and Wales, with the reported burden varying overall and by age group. The objective of this study was to estimate the impact of influenza on all-cause and cause-specific mortality during this period. Age-specific generalised linear regression models fitted with an identity link were developed, modelling weekly influenza activity through multiplying clinical influenza-like illness consultation rates with proportion of samples positive for influenza A or B. To adjust for confounding factors, a similar activity indicator was calculated for Respiratory Syncytial Virus. Extreme temperature and seasonal trend were controlled for. Following a severe influenza season in 2008/09 in 65+yr olds (estimated excess of 13,058 influenza A all-cause deaths), attributed all-cause mortality was not significant during the 2009 pandemic in this age group and comparatively low levels of influenza A mortality were seen in post-pandemic seasons. The age shift of the burden of seasonal influenza from the elderly to young adults during the pandemic continued into 2010/11; a comparatively larger impact was seen with the same circulating A(H1N1)pdm09 strain, with the burden of influenza A all-cause excess mortality in 15\textendash 64 yr olds the largest reported during 2008/09\textendash 2011/12 (436 deaths in 15\textendash 44 yr olds and 1,274 in 45\textendash 64 yr olds). On average, 76\% of seasonal influenza A all-age attributable deaths had a cardiovascular or respiratory cause recorded (average of 5,849 influenza A deaths per season), with nearly a quarter reported for other causes (average of 1,770 influenza A deaths per season), highlighting the importance of all-cause as well as cause-specific estimates. No significant influenza B attributable mortality was detected by season, cause or age group. This analysis forms part of the preparatory work to establish a routine mortality monitoring system ahead of introduction of the UK universal childhood seasonal influenza vaccination programme in 2013/14.},
file = {/home/mike/Dropbox/bibliography/pdfs/Green et al_2013_Mortality Attributable to Influenza in England and Wales Prior to, during and.pdf},
journal = {PLOS ONE},
keywords = {Age groups,Death rates,England,Influenza,Influenza A virus,Pandemics,Pneumonia,Respiratory infections},
language = {English},
number = {12}
}
@article{hintonReducingDimensionalityData2006,
title = {Reducing the {{Dimensionality}} of {{Data}} with {{Neural Networks}}},
author = {Hinton, G. E.},
year = {2006},
month = jul,
volume = {313},
pages = {504--507},
issn = {0036-8075, 1095-9203},
doi = {10.1126/science.1127647},
journal = {Science},
language = {English},
number = {5786}
}
@article{hintonReducingDimensionalityData2006,
title = {Reducing the {{Dimensionality}} of {{Data}} with {{Neural Networks}}},
author = {Hinton, G. E.},
year = {2006},
month = jul,
volume = {313},
pages = {504--507},
issn = {0036-8075, 1095-9203},
doi = {10.1126/science.1127647},
journal = {Science},
language = {English},
number = {5786}
}
@article{hiraReviewFeatureSelection2015,
title = {A {{Review}} of {{Feature Selection}} and {{Feature Extraction Methods Applied}} on {{Microarray Data}}},
author = {Hira, Zena M. and Gillies, Duncan F.},
year = {2015},
month = jun,
volume = {2015},
pages = {1--13},
issn = {1687-8027, 1687-8035},
doi = {10.1155/2015/198363},
abstract = {We summarise various ways of performing dimensionality reduction on high-dimensional microarray data. Many different feature selection and feature extraction methods exist and they are being widely used. All these methods aim to remove redundant and irrelevant features so that classification of new instances will be more accurate. A popular source of data is microarrays, a biological platform for gathering gene expressions. Analysing microarrays can be difficult due to the size of the data they provide. In addition the complicated relations among the different genes make analysis more difficult and removing excess features can improve the quality of the results. We present some of the most popular methods for selecting significant features and provide a comparison between them. Their advantages and disadvantages are outlined in order to provide a clearer idea of when to use each one of them for saving computational time and resources.},
journal = {Advances in Bioinformatics},
language = {English}
}
@article{hiraReviewFeatureSelection2015,
title = {A {{Review}} of {{Feature Selection}} and {{Feature Extraction Methods Applied}} on {{Microarray Data}}},
author = {Hira, Zena M. and Gillies, Duncan F.},
year = {2015},
month = jun,
volume = {2015},
pages = {1--13},
issn = {1687-8027, 1687-8035},
doi = {10.1155/2015/198363},
abstract = {We summarise various ways of performing dimensionality reduction on high-dimensional microarray data. Many different feature selection and feature extraction methods exist and they are being widely used. All these methods aim to remove redundant and irrelevant features so that classification of new instances will be more accurate. A popular source of data is microarrays, a biological platform for gathering gene expressions. Analysing microarrays can be difficult due to the size of the data they provide. In addition the complicated relations among the different genes make analysis more difficult and removing excess features can improve the quality of the results. We present some of the most popular methods for selecting significant features and provide a comparison between them. Their advantages and disadvantages are outlined in order to provide a clearer idea of when to use each one of them for saving computational time and resources.},
journal = {Advances in Bioinformatics},
language = {English}
}
@book{hutterAutomatedMachineLearning2019,
title = {Automated {{Machine Learning}}: {{Methods}}, {{Systems}}, {{Challenges}}},
shorttitle = {Automated {{Machine Learning}}},
editor = {Hutter, Frank and Kotthoff, Lars and Vanschoren, Joaquin},
year = {2019},
publisher = {{Springer International Publishing}},
address = {{Cham}},
doi = {10.1007/978-3-030-05318-5},
isbn = {978-3-030-05317-8 978-3-030-05318-5},
language = {English},
series = {The {{Springer Series}} on {{Challenges}} in {{Machine Learning}}}
}
@book{hutterAutomatedMachineLearning2019,
title = {Automated {{Machine Learning}}: {{Methods}}, {{Systems}}, {{Challenges}}},
shorttitle = {Automated {{Machine Learning}}},
editor = {Hutter, Frank and Kotthoff, Lars and Vanschoren, Joaquin},
year = {2019},
publisher = {{Springer International Publishing}},
address = {{Cham}},
doi = {10.1007/978-3-030-05318-5},
isbn = {978-3-030-05317-8 978-3-030-05318-5},
language = {English},
series = {The {{Springer Series}} on {{Challenges}} in {{Machine Learning}}}
}
@book{igualIntroductionDataScience2017,
title = {Introduction to {{Data Science}}},
author = {Igual, Laura and Segu{\'i}, Santi},
year = {2017},
publisher = {{Springer International Publishing}},
address = {{Cham}},
doi = {10.1007/978-3-319-50017-1},
isbn = {978-3-319-50016-4 978-3-319-50017-1},
language = {English},
series = {Undergraduate {{Topics}} in {{Computer Science}}}
}
@book{igualIntroductionDataScience2017,
title = {Introduction to {{Data Science}}},
author = {Igual, Laura and Segu{\'i}, Santi},
year = {2017},
publisher = {{Springer International Publishing}},
address = {{Cham}},
doi = {10.1007/978-3-319-50017-1},
isbn = {978-3-319-50016-4 978-3-319-50017-1},
language = {English},
series = {Undergraduate {{Topics}} in {{Computer Science}}}
}
@article{isolaImagetoImageTranslationConditional2018,
title = {Image-to-{{Image Translation}} with {{Conditional Adversarial Networks}}},
author = {Isola, Phillip and Zhu, Jun-Yan and Zhou, Tinghui and Efros, Alexei A.},
year = {2018},
month = nov,
abstract = {We investigate conditional adversarial networks as a general-purpose solution to image-to-image translation problems. These networks not only learn the mapping from input image to output image, but also learn a loss function to train this mapping. This makes it possible to apply the same generic approach to problems that traditionally would require very different loss formulations. We demonstrate that this approach is effective at synthesizing photos from label maps, reconstructing objects from edge maps, and colorizing images, among other tasks. Indeed, since the release of the pix2pix software associated with this paper, a large number of internet users (many of them artists) have posted their own experiments with our system, further demonstrating its wide applicability and ease of adoption without the need for parameter tweaking. As a community, we no longer hand-engineer our mapping functions, and this work suggests we can achieve reasonable results without hand-engineering our loss functions either.},
archiveprefix = {arXiv},
eprint = {1611.07004},
eprinttype = {arxiv},
journal = {arXiv:1611.07004 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{isolaImagetoImageTranslationConditional2018,
title = {Image-to-{{Image Translation}} with {{Conditional Adversarial Networks}}},
author = {Isola, Phillip and Zhu, Jun-Yan and Zhou, Tinghui and Efros, Alexei A.},
year = {2018},
month = nov,
abstract = {We investigate conditional adversarial networks as a general-purpose solution to image-to-image translation problems. These networks not only learn the mapping from input image to output image, but also learn a loss function to train this mapping. This makes it possible to apply the same generic approach to problems that traditionally would require very different loss formulations. We demonstrate that this approach is effective at synthesizing photos from label maps, reconstructing objects from edge maps, and colorizing images, among other tasks. Indeed, since the release of the pix2pix software associated with this paper, a large number of internet users (many of them artists) have posted their own experiments with our system, further demonstrating its wide applicability and ease of adoption without the need for parameter tweaking. As a community, we no longer hand-engineer our mapping functions, and this work suggests we can achieve reasonable results without hand-engineering our loss functions either.},
archiveprefix = {arXiv},
eprint = {1611.07004},
eprinttype = {arxiv},
file = {/home/mike/Dropbox/bibliography/pdfs/Isola et al. - 2018 - Image-to-Image Translation with Conditional Advers.pdf},
journal = {arXiv:1611.07004 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{iulianoEstimatesGlobalSeasonal2018,
title = {Estimates of Global Seasonal Influenza-Associated Respiratory Mortality: {{A}} Modelling Study},
shorttitle = {Estimates of Global Seasonal Influenza-Associated Respiratory Mortality},
author = {Iuliano, A Danielle and Roguski, Katherine M and Chang, Howard H and Muscatello, David J and Palekar, Rakhee and Tempia, Stefano and Cohen, Cheryl and Gran, Jon Michael and Schanzer, Dena and Cowling, Benjamin J and Wu, Peng and Kyncl, Jan and Ang, Li Wei and Park, Minah and {Redlberger-Fritz}, Monika and Yu, Hongjie and Espenhain, Laura and Krishnan, Anand and Emukule, Gideon and {van Asten}, Liselotte and {Pereira da Silva}, Susana and Aungkulanon, Suchunya and Buchholz, Udo and Widdowson, Marc-Alain and Bresee, Joseph S and {Azziz-Baumgartner}, Eduardo and Cheng, Po-Yung and Dawood, Fatimah and Foppa, Ivo and Olsen, Sonja and Haber, Michael and Jeffers, Caprichia and MacIntyre, C Raina and Newall, Anthony T and Wood, James G and Kundi, Michael and {Popow-Kraupp}, Therese and Ahmed, Makhdum and Rahman, Mahmudur and Marinho, Fatima and Sotomayor Proschle, C Viviana and Vergara Mallegas, Natalia and Luzhao, Feng and Sa, Li and {Barbosa-Ram{\'i}rez}, Juliana and Sanchez, Diana Malo and Gomez, Leandra Abarca and Vargas, Xiomara Badilla and Acosta Herrera, aBetsy and Llan{\'e}s, Mar{\'i}a Josefa and Fischer, Thea K{\o}lsen and Krause, Tyra Grove and M{\o}lbak, K{\aa}re and Nielsen, Jens and Trebbien, Ramona and Bruno, Alfredo and Ojeda, Jenny and Ramos, Hector and {an der Heiden}, Matthias and {del Carmen Castillo Signor}, Leticia and Serrano, Carlos Enrique and Bhardwaj, Rohit and Chadha, Mandeep and Narayan, Venkatesh and Kosen, Soewarta and Bromberg, Michal and {Glatman-Freedman}, Aharona and Kaufman, Zalman and Arima, Yuzo and Oishi, Kazunori and Chaves, Sandra and Nyawanda, Bryan and {Al-Jarallah}, Reem Abdullah and {Kuri-Morales}, Pablo A and Matus, Cuitl{\'a}huac Ruiz and Corona, Maria Eugenia Jimenez and Burmaa, Alexander and Darmaa, Oyungerel and Obtel, Majdouline and Cherkaoui, Imad and {van den Wijngaard}, Cees C and {van der Hoek}, Wim and Baker, Michael and Bandaranayake, Don and Bissielo, Ange and Huang, Sue and Lopez, Liza and Newbern, Claire and Flem, Elmira and Gr{\o}neng, Gry M and Hauge, Siri and {de Cos{\'i}o}, Federico G and {de Molt{\'o}}, Yadira and Castillo, Lourdes Moreno and Cabello, Maria Agueda and {von Horoch}, Marta and Medina Osis, Jose and Machado, Ausenda and Nunes, Baltazar and Rodrigues, Ana Paula and Rodrigues, Emanuel and Calomfirescu, Cristian and Lupulescu, Emilia and Popescu, Rodica and Popovici, Odette and Bogdanovic, Dragan and Kostic, Marina and Lazarevic, Konstansa and Milosevic, Zoran and Tiodorovic, Branislav and Chen, Mark and Cutter, Jeffery and Lee, Vernon and Lin, Raymond and Ma, Stefan and Cohen, Adam L and Treurnicht, Florette and Kim, Woo Joo and {Delgado-Sanz}, Concha and {de mateo Onta{\~n}{\'o}n}, Salvador and Larrauri, Amparo and Le{\'o}n, Inmaculada Le{\'o}n and Vallejo, Fernando and Born, Rita and Junker, Christoph and Koch, Daniel and Chuang, Jen-Hsiang and Huang, Wan-Ting and Kuo, Hung-Wei and Tsai, Yi-Chen and Bundhamcharoen, Kanitta and Chittaganpitch, Malinee and Green, Helen K and Pebody, Richard and Go{\~n}i, Natalia and Chiparelli, Hector and Brammer, Lynnette and Mustaquim, Desiree},
year = {2018},
month = mar,
volume = {391},
pages = {1285--1300},
issn = {0140-6736},
doi = {10.1016/S0140-6736(17)33293-2},
abstract = {Background Estimates of influenza-associated mortality are important for national and international decision making on public health priorities. Previous estimates of 250,000\textendash 500,000 annual influenza deaths are outdated. We updated the estimated number of global annual influenza-associated respiratory deaths using country-specific influenza-associated excess respiratory mortality estimates from 1999\textendash 2015. Methods We estimated country-specific influenza-associated respiratory excess mortality rates (EMR) for 33 countries using time series log-linear regression models with vital death records and influenza surveillance data. To extrapolate estimates to countries without data, we divided countries into three analytic divisions for three age groups ({$<$}65 years, 65\textendash 74 years, and {$\geq$}75 years) using WHO Global Health Estimate (GHE) respiratory infection mortality rates. We calculated mortality rate ratios (MRR) to account for differences in risk of influenza death across countries by comparing GHE respiratory infection mortality rates from countries without EMR estimates with those with estimates. To calculate death estimates for individual countries within each age-specific analytic division, we multiplied randomly selected mean annual EMRs by the country's MRR and population. Global 95\% credible interval (CrI) estimates were obtained from the posterior distribution of the sum of country-specific estimates to represent the range of possible influenza-associated deaths in a season or year. We calculated influenza-associated deaths for children younger than 5 years for 92 countries with high rates of mortality due to respiratory infection using the same methods. Findings EMR-contributing countries represented 57\% of the global population. The estimated mean annual influenza-associated respiratory EMR ranged from 0{$\cdot$}1 to 6{$\cdot$}4 per 100,000 individuals for people younger than 65 years, 2{$\cdot$}9 to 44{$\cdot$}0 per 100,000 individuals for people aged between 65 and 74 years, and 17{$\cdot$}9 to 223{$\cdot$}5 per 100,000 for people older than 75 years. We estimated that 291,243\textendash 645,832 seasonal influenza-associated respiratory deaths (4{$\cdot$}0\textendash 8{$\cdot$}8 per 100,000 individuals) occur annually. The highest mortality rates were estimated in sub-Saharan Africa (2{$\cdot$}8\textendash 16{$\cdot$}5 per 100,000 individuals), southeast Asia (3{$\cdot$}5\textendash 9{$\cdot$}2 per 100,000 individuals), and among people aged 75 years or older (51{$\cdot$}3\textendash 99{$\cdot$}4 per 100,000 individuals). For 92 countries, we estimated that among children younger than 5 years, 9243\textendash 105,690 influenza-associated respiratory deaths occur annually. Interpretation These global influenza-associated respiratory mortality estimates are higher than previously reported, suggesting that previous estimates might have underestimated disease burden. The contribution of non-respiratory causes of death to global influenza-associated mortality should be investigated. Funding None.},
journal = {The Lancet},
language = {English},
number = {10127}
}
@article{iulianoEstimatesGlobalSeasonal2018,
title = {Estimates of Global Seasonal Influenza-Associated Respiratory Mortality: {{A}} Modelling Study},
shorttitle = {Estimates of Global Seasonal Influenza-Associated Respiratory Mortality},
author = {Iuliano, A Danielle and Roguski, Katherine M and Chang, Howard H and Muscatello, David J and Palekar, Rakhee and Tempia, Stefano and Cohen, Cheryl and Gran, Jon Michael and Schanzer, Dena and Cowling, Benjamin J and Wu, Peng and Kyncl, Jan and Ang, Li Wei and Park, Minah and {Redlberger-Fritz}, Monika and Yu, Hongjie and Espenhain, Laura and Krishnan, Anand and Emukule, Gideon and {van Asten}, Liselotte and {Pereira da Silva}, Susana and Aungkulanon, Suchunya and Buchholz, Udo and Widdowson, Marc-Alain and Bresee, Joseph S and {Azziz-Baumgartner}, Eduardo and Cheng, Po-Yung and Dawood, Fatimah and Foppa, Ivo and Olsen, Sonja and Haber, Michael and Jeffers, Caprichia and MacIntyre, C Raina and Newall, Anthony T and Wood, James G and Kundi, Michael and {Popow-Kraupp}, Therese and Ahmed, Makhdum and Rahman, Mahmudur and Marinho, Fatima and Sotomayor Proschle, C Viviana and Vergara Mallegas, Natalia and Luzhao, Feng and Sa, Li and {Barbosa-Ram{\'i}rez}, Juliana and Sanchez, Diana Malo and Gomez, Leandra Abarca and Vargas, Xiomara Badilla and Acosta Herrera, aBetsy and Llan{\'e}s, Mar{\'i}a Josefa and Fischer, Thea K{\o}lsen and Krause, Tyra Grove and M{\o}lbak, K{\aa}re and Nielsen, Jens and Trebbien, Ramona and Bruno, Alfredo and Ojeda, Jenny and Ramos, Hector and {an der Heiden}, Matthias and {del Carmen Castillo Signor}, Leticia and Serrano, Carlos Enrique and Bhardwaj, Rohit and Chadha, Mandeep and Narayan, Venkatesh and Kosen, Soewarta and Bromberg, Michal and {Glatman-Freedman}, Aharona and Kaufman, Zalman and Arima, Yuzo and Oishi, Kazunori and Chaves, Sandra and Nyawanda, Bryan and {Al-Jarallah}, Reem Abdullah and {Kuri-Morales}, Pablo A and Matus, Cuitl{\'a}huac Ruiz and Corona, Maria Eugenia Jimenez and Burmaa, Alexander and Darmaa, Oyungerel and Obtel, Majdouline and Cherkaoui, Imad and {van den Wijngaard}, Cees C and {van der Hoek}, Wim and Baker, Michael and Bandaranayake, Don and Bissielo, Ange and Huang, Sue and Lopez, Liza and Newbern, Claire and Flem, Elmira and Gr{\o}neng, Gry M and Hauge, Siri and {de Cos{\'i}o}, Federico G and {de Molt{\'o}}, Yadira and Castillo, Lourdes Moreno and Cabello, Maria Agueda and {von Horoch}, Marta and Medina Osis, Jose and Machado, Ausenda and Nunes, Baltazar and Rodrigues, Ana Paula and Rodrigues, Emanuel and Calomfirescu, Cristian and Lupulescu, Emilia and Popescu, Rodica and Popovici, Odette and Bogdanovic, Dragan and Kostic, Marina and Lazarevic, Konstansa and Milosevic, Zoran and Tiodorovic, Branislav and Chen, Mark and Cutter, Jeffery and Lee, Vernon and Lin, Raymond and Ma, Stefan and Cohen, Adam L and Treurnicht, Florette and Kim, Woo Joo and {Delgado-Sanz}, Concha and {de mateo Onta{\~n}{\'o}n}, Salvador and Larrauri, Amparo and Le{\'o}n, Inmaculada Le{\'o}n and Vallejo, Fernando and Born, Rita and Junker, Christoph and Koch, Daniel and Chuang, Jen-Hsiang and Huang, Wan-Ting and Kuo, Hung-Wei and Tsai, Yi-Chen and Bundhamcharoen, Kanitta and Chittaganpitch, Malinee and Green, Helen K and Pebody, Richard and Go{\~n}i, Natalia and Chiparelli, Hector and Brammer, Lynnette and Mustaquim, Desiree},
year = {2018},
month = mar,
volume = {391},
pages = {1285--1300},
issn = {0140-6736},
doi = {10.1016/S0140-6736(17)33293-2},
abstract = {Background Estimates of influenza-associated mortality are important for national and international decision making on public health priorities. Previous estimates of 250,000\textendash 500,000 annual influenza deaths are outdated. We updated the estimated number of global annual influenza-associated respiratory deaths using country-specific influenza-associated excess respiratory mortality estimates from 1999\textendash 2015. Methods We estimated country-specific influenza-associated respiratory excess mortality rates (EMR) for 33 countries using time series log-linear regression models with vital death records and influenza surveillance data. To extrapolate estimates to countries without data, we divided countries into three analytic divisions for three age groups ({$<$}65 years, 65\textendash 74 years, and {$\geq$}75 years) using WHO Global Health Estimate (GHE) respiratory infection mortality rates. We calculated mortality rate ratios (MRR) to account for differences in risk of influenza death across countries by comparing GHE respiratory infection mortality rates from countries without EMR estimates with those with estimates. To calculate death estimates for individual countries within each age-specific analytic division, we multiplied randomly selected mean annual EMRs by the country's MRR and population. Global 95\% credible interval (CrI) estimates were obtained from the posterior distribution of the sum of country-specific estimates to represent the range of possible influenza-associated deaths in a season or year. We calculated influenza-associated deaths for children younger than 5 years for 92 countries with high rates of mortality due to respiratory infection using the same methods. Findings EMR-contributing countries represented 57\% of the global population. The estimated mean annual influenza-associated respiratory EMR ranged from 0{$\cdot$}1 to 6{$\cdot$}4 per 100,000 individuals for people younger than 65 years, 2{$\cdot$}9 to 44{$\cdot$}0 per 100,000 individuals for people aged between 65 and 74 years, and 17{$\cdot$}9 to 223{$\cdot$}5 per 100,000 for people older than 75 years. We estimated that 291,243\textendash 645,832 seasonal influenza-associated respiratory deaths (4{$\cdot$}0\textendash 8{$\cdot$}8 per 100,000 individuals) occur annually. The highest mortality rates were estimated in sub-Saharan Africa (2{$\cdot$}8\textendash 16{$\cdot$}5 per 100,000 individuals), southeast Asia (3{$\cdot$}5\textendash 9{$\cdot$}2 per 100,000 individuals), and among people aged 75 years or older (51{$\cdot$}3\textendash 99{$\cdot$}4 per 100,000 individuals). For 92 countries, we estimated that among children younger than 5 years, 9243\textendash 105,690 influenza-associated respiratory deaths occur annually. Interpretation These global influenza-associated respiratory mortality estimates are higher than previously reported, suggesting that previous estimates might have underestimated disease burden. The contribution of non-respiratory causes of death to global influenza-associated mortality should be investigated. Funding None.},
file = {/home/mike/Dropbox/bibliography/pdfs/Iuliano et al_2018_Estimates of global seasonal influenza-associated respiratory mortality.pdf},
journal = {The Lancet},
language = {English},
number = {10127}
}
@book{jamesIntroductionStatisticalLearning2013,
title = {An {{Introduction}} to {{Statistical Learning}}},
author = {James, Gareth and Witten, Daniela and Hastie, Trevor and Tibshirani, Robert},
year = {2013},
volume = {103},
publisher = {{Springer New York}},
address = {{New York, NY}},
doi = {10.1007/978-1-4614-7138-7},
isbn = {978-1-4614-7137-0 978-1-4614-7138-7},
language = {English},
series = {Springer {{Texts}} in {{Statistics}}}
}
@book{jamesIntroductionStatisticalLearning2013,
title = {An {{Introduction}} to {{Statistical Learning}}},
author = {James, Gareth and Witten, Daniela and Hastie, Trevor and Tibshirani, Robert},
year = {2013},
volume = {103},
publisher = {{Springer New York}},
address = {{New York, NY}},
doi = {10.1007/978-1-4614-7138-7},
isbn = {978-1-4614-7137-0 978-1-4614-7138-7},
language = {English},
series = {Springer {{Texts}} in {{Statistics}}}
}
@article{jiangTSITSimpleVersatile2020,
title = {{{TSIT}}: {{A Simple}} and {{Versatile Framework}} for {{Image}}-to-{{Image Translation}}},
shorttitle = {{{TSIT}}},
author = {Jiang, Liming and Zhang, Changxu and Huang, Mingyang and Liu, Chunxiao and Shi, Jianping and Loy, Chen Change},
year = {2020},
month = jul,
abstract = {We introduce a simple and versatile framework for image-toimage translation. We unearth the importance of normalization layers, and provide a carefully designed two-stream generative model with newly proposed feature transformations in a coarse-to-fine fashion. This allows multi-scale semantic structure information and style representation to be effectively captured and fused by the network, permitting our method to scale to various tasks in both unsupervised and supervised settings. No additional constraints (e.g., cycle consistency) are needed, contributing to a very clean and simple method. Multi-modal image synthesis with arbitrary style control is made possible. A systematic study compares the proposed method with several state-of-the-art task-specific baselines, verifying its effectiveness in both perceptual quality and quantitative evaluations. GitHub: https://github.com/EndlessSora/TSIT.},
archiveprefix = {arXiv},
eprint = {2007.12072},
eprinttype = {arxiv},
journal = {arXiv:2007.12072 [cs, eess]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning,Electrical Engineering and Systems Science - Image and Video Processing},
language = {English},
primaryclass = {cs, eess}
}
@article{jiangTSITSimpleVersatile2020,
title = {{{TSIT}}: {{A Simple}} and {{Versatile Framework}} for {{Image}}-to-{{Image Translation}}},
shorttitle = {{{TSIT}}},
author = {Jiang, Liming and Zhang, Changxu and Huang, Mingyang and Liu, Chunxiao and Shi, Jianping and Loy, Chen Change},
year = {2020},
month = jul,
abstract = {We introduce a simple and versatile framework for image-toimage translation. We unearth the importance of normalization layers, and provide a carefully designed two-stream generative model with newly proposed feature transformations in a coarse-to-fine fashion. This allows multi-scale semantic structure information and style representation to be effectively captured and fused by the network, permitting our method to scale to various tasks in both unsupervised and supervised settings. No additional constraints (e.g., cycle consistency) are needed, contributing to a very clean and simple method. Multi-modal image synthesis with arbitrary style control is made possible. A systematic study compares the proposed method with several state-of-the-art task-specific baselines, verifying its effectiveness in both perceptual quality and quantitative evaluations. GitHub: https://github.com/EndlessSora/TSIT.},
archiveprefix = {arXiv},
eprint = {2007.12072},
eprinttype = {arxiv},
journal = {arXiv:2007.12072 [cs, eess]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning,Electrical Engineering and Systems Science - Image and Video Processing},
language = {English},
primaryclass = {cs, eess}
}
@article{karrasStyleBasedGeneratorArchitecture2019,
title = {A {{Style}}-{{Based Generator Architecture}} for {{Generative Adversarial Networks}}},
author = {Karras, Tero and Laine, Samuli and Aila, Timo},
year = {2019},
month = mar,
abstract = {We propose an alternative generator architecture for generative adversarial networks, borrowing from style transfer literature. The new architecture leads to an automatically learned, unsupervised separation of high-level attributes (e.g., pose and identity when trained on human faces) and stochastic variation in the generated images (e.g., freckles, hair), and it enables intuitive, scale-specific control of the synthesis. The new generator improves the state-of-the-art in terms of traditional distribution quality metrics, leads to demonstrably better interpolation properties, and also better disentangles the latent factors of variation. To quantify interpolation quality and disentanglement, we propose two new, automated methods that are applicable to any generator architecture. Finally, we introduce a new, highly varied and high-quality dataset of human faces.},
archiveprefix = {arXiv},
eprint = {1812.04948},
eprinttype = {arxiv},
journal = {arXiv:1812.04948 [cs, stat]},
keywords = {Computer Science - Machine Learning,Computer Science - Neural and Evolutionary Computing,Statistics - Machine Learning},
language = {English},
primaryclass = {cs, stat}
}
@article{karrasStyleBasedGeneratorArchitecture2019,
title = {A {{Style}}-{{Based Generator Architecture}} for {{Generative Adversarial Networks}}},
author = {Karras, Tero and Laine, Samuli and Aila, Timo},
year = {2019},
month = mar,
abstract = {We propose an alternative generator architecture for generative adversarial networks, borrowing from style transfer literature. The new architecture leads to an automatically learned, unsupervised separation of high-level attributes (e.g., pose and identity when trained on human faces) and stochastic variation in the generated images (e.g., freckles, hair), and it enables intuitive, scale-specific control of the synthesis. The new generator improves the state-of-the-art in terms of traditional distribution quality metrics, leads to demonstrably better interpolation properties, and also better disentangles the latent factors of variation. To quantify interpolation quality and disentanglement, we propose two new, automated methods that are applicable to any generator architecture. Finally, we introduce a new, highly varied and high-quality dataset of human faces.},
archiveprefix = {arXiv},
eprint = {1812.04948},
eprinttype = {arxiv},
journal = {arXiv:1812.04948 [cs, stat]},
keywords = {Computer Science - Machine Learning,Computer Science - Neural and Evolutionary Computing,Statistics - Machine Learning},
language = {English},
primaryclass = {cs, stat}
}
@article{kingmaAdamMethodStochastic2017,
title = {Adam: {{A Method}} for {{Stochastic Optimization}}},
shorttitle = {Adam},
author = {Kingma, Diederik P. and Ba, Jimmy},
year = {2017},
month = jan,
abstract = {We introduce Adam, an algorithm for first-order gradient-based optimization of stochastic objective functions, based on adaptive estimates of lower-order moments. The method is straightforward to implement, is computationally efficient, has little memory requirements, is invariant to diagonal rescaling of the gradients, and is well suited for problems that are large in terms of data and/or parameters. The method is also appropriate for non-stationary objectives and problems with very noisy and/or sparse gradients. The hyper-parameters have intuitive interpretations and typically require little tuning. Some connections to related algorithms, on which Adam was inspired, are discussed. We also analyze the theoretical convergence properties of the algorithm and provide a regret bound on the convergence rate that is comparable to the best known results under the online convex optimization framework. Empirical results demonstrate that Adam works well in practice and compares favorably to other stochastic optimization methods. Finally, we discuss AdaMax, a variant of Adam based on the infinity norm.},
archiveprefix = {arXiv},
eprint = {1412.6980},
eprinttype = {arxiv},
journal = {arXiv:1412.6980 [cs]},
keywords = {Computer Science - Machine Learning},
language = {English},
primaryclass = {cs}
}
@article{kingmaAdamMethodStochastic2017,
title = {Adam: {{A Method}} for {{Stochastic Optimization}}},
shorttitle = {Adam},
author = {Kingma, Diederik P. and Ba, Jimmy},
year = {2017},
month = jan,
abstract = {We introduce Adam, an algorithm for first-order gradient-based optimization of stochastic objective functions, based on adaptive estimates of lower-order moments. The method is straightforward to implement, is computationally efficient, has little memory requirements, is invariant to diagonal rescaling of the gradients, and is well suited for problems that are large in terms of data and/or parameters. The method is also appropriate for non-stationary objectives and problems with very noisy and/or sparse gradients. The hyper-parameters have intuitive interpretations and typically require little tuning. Some connections to related algorithms, on which Adam was inspired, are discussed. We also analyze the theoretical convergence properties of the algorithm and provide a regret bound on the convergence rate that is comparable to the best known results under the online convex optimization framework. Empirical results demonstrate that Adam works well in practice and compares favorably to other stochastic optimization methods. Finally, we discuss AdaMax, a variant of Adam based on the infinity norm.},
archiveprefix = {arXiv},
eprint = {1412.6980},
eprinttype = {arxiv},
journal = {arXiv:1412.6980 [cs]},
keywords = {Computer Science - Machine Learning},
language = {English},
primaryclass = {cs}
}
@article{larsenAutoencodingPixelsUsing2016,
title = {Autoencoding beyond Pixels Using a Learned Similarity Metric},
author = {Larsen, Anders Boesen Lindbo and S{\o}nderby, S{\o}ren Kaae and Larochelle, Hugo and Winther, Ole},
year = {2016},
month = feb,
abstract = {We present an autoencoder that leverages learned representations to better measure similarities in data space. By combining a variational autoencoder with a generative adversarial network we can use learned feature representations in the GAN discriminator as basis for the VAE reconstruction objective. Thereby, we replace element-wise errors with feature-wise errors to better capture the data distribution while offering invariance towards e.g. translation. We apply our method to images of faces and show that it outperforms VAEs with element-wise similarity measures in terms of visual fidelity. Moreover, we show that the method learns an embedding in which high-level abstract visual features (e.g. wearing glasses) can be modified using simple arithmetic.},
archiveprefix = {arXiv},
eprint = {1512.09300},
eprinttype = {arxiv},
journal = {arXiv:1512.09300 [cs, stat]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning,Statistics - Machine Learning},
language = {English},
primaryclass = {cs, stat}
}
@article{larsenAutoencodingPixelsUsing2016,
title = {Autoencoding beyond Pixels Using a Learned Similarity Metric},
author = {Larsen, Anders Boesen Lindbo and S{\o}nderby, S{\o}ren Kaae and Larochelle, Hugo and Winther, Ole},
year = {2016},
month = feb,
abstract = {We present an autoencoder that leverages learned representations to better measure similarities in data space. By combining a variational autoencoder with a generative adversarial network we can use learned feature representations in the GAN discriminator as basis for the VAE reconstruction objective. Thereby, we replace element-wise errors with feature-wise errors to better capture the data distribution while offering invariance towards e.g. translation. We apply our method to images of faces and show that it outperforms VAEs with element-wise similarity measures in terms of visual fidelity. Moreover, we show that the method learns an embedding in which high-level abstract visual features (e.g. wearing glasses) can be modified using simple arithmetic.},
archiveprefix = {arXiv},
eprint = {1512.09300},
eprinttype = {arxiv},
journal = {arXiv:1512.09300 [cs, stat]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning,Statistics - Machine Learning},
language = {English},
primaryclass = {cs, stat}
}
@article{lecunGradientBasedLearningApplied1998,
title = {Gradient-{{Based Learning Applied}} to {{Document Recognition}}},
author = {Lecun, Y. and Bottou, L. and Bengio, Y. and Haffner, P.},
year = {1998},
volume = {86},
pages = {2278--2324},
doi = {10.1109/5.726791},
journal = {Proceedings of the IEEE},
number = {11}
}
@article{lecunGradientBasedLearningApplied1998,
title = {Gradient-{{Based Learning Applied}} to {{Document Recognition}}},
author = {Lecun, Y. and Bottou, L. and Bengio, Y. and Haffner, P.},
year = {1998},
volume = {86},
pages = {2278--2324},
doi = {10.1109/5.726791},
journal = {Proceedings of the IEEE},
number = {11}
}
@article{leeMaskGANDiverseInteractive2020,
title = {{{MaskGAN}}: {{Towards Diverse}} and {{Interactive Facial Image Manipulation}}},
shorttitle = {{{MaskGAN}}},
author = {Lee, Cheng-Han and Liu, Ziwei and Wu, Lingyun and Luo, Ping},
year = {2020},
month = apr,
abstract = {Facial image manipulation has achieved great progress in recent years. However, previous methods either operate on a predefined set of face attributes or leave users little freedom to interactively manipulate images. To overcome these drawbacks, we propose a novel framework termed MaskGAN, enabling diverse and interactive face manipulation. Our key insight is that semantic masks serve as a suitable intermediate representation for flexible face manipulation with fidelity preservation. MaskGAN has two main components: 1) Dense Mapping Network (DMN) and 2) Editing Behavior Simulated Training (EBST). Specifically, DMN learns style mapping between a free-form user modified mask and a target image, enabling diverse generation results. EBST models the user editing behavior on the source mask, making the overall framework more robust to various manipulated inputs. Specifically, it introduces dual-editing consistency as the auxiliary supervision signal. To facilitate extensive studies, we construct a large-scale high-resolution face dataset with fine-grained mask annotations named CelebAMask-HQ. MaskGAN is comprehensively evaluated on two challenging tasks: attribute transfer and style copy, demonstrating superior performance over other state-of-the-art methods. The code, models, and dataset are available at https://github.com/switchablenorms/CelebAMask-HQ.},
archiveprefix = {arXiv},
eprint = {1907.11922},
eprinttype = {arxiv},
journal = {arXiv:1907.11922 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Graphics,Computer Science - Machine Learning},
language = {English},
primaryclass = {cs}
}
@article{leeMaskGANDiverseInteractive2020,
title = {{{MaskGAN}}: {{Towards Diverse}} and {{Interactive Facial Image Manipulation}}},
shorttitle = {{{MaskGAN}}},
author = {Lee, Cheng-Han and Liu, Ziwei and Wu, Lingyun and Luo, Ping},
year = {2020},
month = apr,
abstract = {Facial image manipulation has achieved great progress in recent years. However, previous methods either operate on a predefined set of face attributes or leave users little freedom to interactively manipulate images. To overcome these drawbacks, we propose a novel framework termed MaskGAN, enabling diverse and interactive face manipulation. Our key insight is that semantic masks serve as a suitable intermediate representation for flexible face manipulation with fidelity preservation. MaskGAN has two main components: 1) Dense Mapping Network (DMN) and 2) Editing Behavior Simulated Training (EBST). Specifically, DMN learns style mapping between a free-form user modified mask and a target image, enabling diverse generation results. EBST models the user editing behavior on the source mask, making the overall framework more robust to various manipulated inputs. Specifically, it introduces dual-editing consistency as the auxiliary supervision signal. To facilitate extensive studies, we construct a large-scale high-resolution face dataset with fine-grained mask annotations named CelebAMask-HQ. MaskGAN is comprehensively evaluated on two challenging tasks: attribute transfer and style copy, demonstrating superior performance over other state-of-the-art methods. The code, models, and dataset are available at https://github.com/switchablenorms/CelebAMask-HQ.},
archiveprefix = {arXiv},
eprint = {1907.11922},
eprinttype = {arxiv},
journal = {arXiv:1907.11922 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Graphics,Computer Science - Machine Learning},
language = {English},
primaryclass = {cs}
}
@article{limGeometricGAN2017,
title = {Geometric {{GAN}}},
author = {Lim, Jae Hyun and Ye, Jong Chul},
year = {2017},
month = may,
abstract = {Generative Adversarial Nets (GANs) represent an important milestone for effective generative models, which has inspired numerous variants seemingly different from each other. One of the main contributions of this paper is to reveal a unified geometric structure in GAN and its variants. Specifically, we show that the adversarial generative model training can be decomposed into three geometric steps: separating hyperplane search, discriminator parameter update away from the separating hyperplane, and the generator update along the normal vector direction of the separating hyperplane. This geometric intuition reveals the limitations of the existing approaches and leads us to propose a new formulation called geometric GAN using SVM separating hyperplane that maximizes the margin. Our theoretical analysis shows that the geometric GAN converges to a Nash equilibrium between the discriminator and generator. In addition, extensive numerical results show that the superior performance of geometric GAN.},
archiveprefix = {arXiv},
eprint = {1705.02894},
eprinttype = {arxiv},
journal = {arXiv:1705.02894 [cond-mat, stat]},
keywords = {Computer Science - Artificial Intelligence,Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning,Condensed Matter - Disordered Systems and Neural Networks,Statistics - Machine Learning},
language = {English},
primaryclass = {cond-mat, stat}
}
@article{limGeometricGAN2017,
title = {Geometric {{GAN}}},
author = {Lim, Jae Hyun and Ye, Jong Chul},
year = {2017},
month = may,
abstract = {Generative Adversarial Nets (GANs) represent an important milestone for effective generative models, which has inspired numerous variants seemingly different from each other. One of the main contributions of this paper is to reveal a unified geometric structure in GAN and its variants. Specifically, we show that the adversarial generative model training can be decomposed into three geometric steps: separating hyperplane search, discriminator parameter update away from the separating hyperplane, and the generator update along the normal vector direction of the separating hyperplane. This geometric intuition reveals the limitations of the existing approaches and leads us to propose a new formulation called geometric GAN using SVM separating hyperplane that maximizes the margin. Our theoretical analysis shows that the geometric GAN converges to a Nash equilibrium between the discriminator and generator. In addition, extensive numerical results show that the superior performance of geometric GAN.},
archiveprefix = {arXiv},
eprint = {1705.02894},
eprinttype = {arxiv},
journal = {arXiv:1705.02894 [cond-mat, stat]},
keywords = {Computer Science - Artificial Intelligence,Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning,Condensed Matter - Disordered Systems and Neural Networks,Statistics - Machine Learning},
language = {English},
primaryclass = {cond-mat, stat}
}
@article{limGeometricGAN2017a,
title = {Geometric {{GAN}}},
author = {Lim, Jae Hyun and Ye, Jong Chul},
year = {2017},
month = may,
abstract = {Generative Adversarial Nets (GANs) represent an important milestone for effective generative models, which has inspired numerous variants seemingly different from each other. One of the main contributions of this paper is to reveal a unified geometric structure in GAN and its variants. Specifically, we show that the adversarial generative model training can be decomposed into three geometric steps: separating hyperplane search, discriminator parameter update away from the separating hyperplane, and the generator update along the normal vector direction of the separating hyperplane. This geometric intuition reveals the limitations of the existing approaches and leads us to propose a new formulation called geometric GAN using SVM separating hyperplane that maximizes the margin. Our theoretical analysis shows that the geometric GAN converges to a Nash equilibrium between the discriminator and generator. In addition, extensive numerical results show that the superior performance of geometric GAN.},
archiveprefix = {arXiv},
eprint = {1705.02894},
eprinttype = {arxiv},
journal = {arXiv:1705.02894 [cond-mat, stat]},
keywords = {Computer Science - Artificial Intelligence,Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning,Condensed Matter - Disordered Systems and Neural Networks,Statistics - Machine Learning},
language = {English},
primaryclass = {cond-mat, stat}
}
@article{limGeometricGAN2017a,
title = {Geometric {{GAN}}},
author = {Lim, Jae Hyun and Ye, Jong Chul},
year = {2017},
month = may,
abstract = {Generative Adversarial Nets (GANs) represent an important milestone for effective generative models, which has inspired numerous variants seemingly different from each other. One of the main contributions of this paper is to reveal a unified geometric structure in GAN and its variants. Specifically, we show that the adversarial generative model training can be decomposed into three geometric steps: separating hyperplane search, discriminator parameter update away from the separating hyperplane, and the generator update along the normal vector direction of the separating hyperplane. This geometric intuition reveals the limitations of the existing approaches and leads us to propose a new formulation called geometric GAN using SVM separating hyperplane that maximizes the margin. Our theoretical analysis shows that the geometric GAN converges to a Nash equilibrium between the discriminator and generator. In addition, extensive numerical results show that the superior performance of geometric GAN.},
archiveprefix = {arXiv},
eprint = {1705.02894},
eprinttype = {arxiv},
journal = {arXiv:1705.02894 [cond-mat, stat]},
keywords = {Computer Science - Artificial Intelligence,Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning,Condensed Matter - Disordered Systems and Neural Networks,Statistics - Machine Learning},
language = {English},
primaryclass = {cond-mat, stat}
}
@article{liPrecomputedRealTimeTexture2016,
title = {Precomputed {{Real}}-{{Time Texture Synthesis}} with {{Markovian Generative Adversarial Networks}}},
author = {Li, Chuan and Wand, Michael},
year = {2016},
month = apr,
abstract = {This paper proposes Markovian Generative Adversarial Networks (MGANs), a method for training generative neural networks for efficient texture synthesis. While deep neural network approaches have recently demonstrated remarkable results in terms of synthesis quality, they still come at considerable computational costs (minutes of run-time for low-res images). Our paper addresses this efficiency issue. Instead of a numerical deconvolution in previous work, we precompute a feedforward, strided convolutional network that captures the feature statistics of Markovian patches and is able to directly generate outputs of arbitrary dimensions. Such network can directly decode brown noise to realistic texture, or photos to artistic paintings. With adversarial training, we obtain quality comparable to recent neural texture synthesis methods. As no optimization is required any longer at generation time, our run-time performance (0.25M pixel images at 25Hz) surpasses previous neural texture synthesizers by a significant margin (at least 500 times faster). We apply this idea to texture synthesis, style transfer, and video stylization.},
archiveprefix = {arXiv},
eprint = {1604.04382},
eprinttype = {arxiv},
journal = {arXiv:1604.04382 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{liPrecomputedRealTimeTexture2016,
title = {Precomputed {{Real}}-{{Time Texture Synthesis}} with {{Markovian Generative Adversarial Networks}}},
author = {Li, Chuan and Wand, Michael},
year = {2016},
month = apr,
abstract = {This paper proposes Markovian Generative Adversarial Networks (MGANs), a method for training generative neural networks for efficient texture synthesis. While deep neural network approaches have recently demonstrated remarkable results in terms of synthesis quality, they still come at considerable computational costs (minutes of run-time for low-res images). Our paper addresses this efficiency issue. Instead of a numerical deconvolution in previous work, we precompute a feedforward, strided convolutional network that captures the feature statistics of Markovian patches and is able to directly generate outputs of arbitrary dimensions. Such network can directly decode brown noise to realistic texture, or photos to artistic paintings. With adversarial training, we obtain quality comparable to recent neural texture synthesis methods. As no optimization is required any longer at generation time, our run-time performance (0.25M pixel images at 25Hz) surpasses previous neural texture synthesizers by a significant margin (at least 500 times faster). We apply this idea to texture synthesis, style transfer, and video stylization.},
archiveprefix = {arXiv},
eprint = {1604.04382},
eprinttype = {arxiv},
journal = {arXiv:1604.04382 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{liPrecomputedRealTimeTexture2016a,
title = {Precomputed {{Real}}-{{Time Texture Synthesis}} with {{Markovian Generative Adversarial Networks}}},
author = {Li, Chuan and Wand, Michael},
year = {2016},
month = apr,
abstract = {This paper proposes Markovian Generative Adversarial Networks (MGANs), a method for training generative neural networks for efficient texture synthesis. While deep neural network approaches have recently demonstrated remarkable results in terms of synthesis quality, they still come at considerable computational costs (minutes of run-time for low-res images). Our paper addresses this efficiency issue. Instead of a numerical deconvolution in previous work, we precompute a feedforward, strided convolutional network that captures the feature statistics of Markovian patches and is able to directly generate outputs of arbitrary dimensions. Such network can directly decode brown noise to realistic texture, or photos to artistic paintings. With adversarial training, we obtain quality comparable to recent neural texture synthesis methods. As no optimization is required any longer at generation time, our run-time performance (0.25M pixel images at 25Hz) surpasses previous neural texture synthesizers by a significant margin (at least 500 times faster). We apply this idea to texture synthesis, style transfer, and video stylization.},
archiveprefix = {arXiv},
eprint = {1604.04382},
eprinttype = {arxiv},
journal = {arXiv:1604.04382 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{liPrecomputedRealTimeTexture2016a,
title = {Precomputed {{Real}}-{{Time Texture Synthesis}} with {{Markovian Generative Adversarial Networks}}},
author = {Li, Chuan and Wand, Michael},
year = {2016},
month = apr,
abstract = {This paper proposes Markovian Generative Adversarial Networks (MGANs), a method for training generative neural networks for efficient texture synthesis. While deep neural network approaches have recently demonstrated remarkable results in terms of synthesis quality, they still come at considerable computational costs (minutes of run-time for low-res images). Our paper addresses this efficiency issue. Instead of a numerical deconvolution in previous work, we precompute a feedforward, strided convolutional network that captures the feature statistics of Markovian patches and is able to directly generate outputs of arbitrary dimensions. Such network can directly decode brown noise to realistic texture, or photos to artistic paintings. With adversarial training, we obtain quality comparable to recent neural texture synthesis methods. As no optimization is required any longer at generation time, our run-time performance (0.25M pixel images at 25Hz) surpasses previous neural texture synthesizers by a significant margin (at least 500 times faster). We apply this idea to texture synthesis, style transfer, and video stylization.},
archiveprefix = {arXiv},
eprint = {1604.04382},
eprinttype = {arxiv},
journal = {arXiv:1604.04382 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{liuLiquidWarpingGAN2020,
title = {Liquid {{Warping GAN}} with {{Attention}}: {{A Unified Framework}} for {{Human Image Synthesis}}},
shorttitle = {Liquid {{Warping GAN}} with {{Attention}}},
author = {Liu, Wen and Piao, Zhixin and Tu, Zhi and Luo, Wenhan and Ma, Lin and Gao, Shenghua},
year = {2020},
month = nov,
abstract = {We tackle human image synthesis, including human motion imitation, appearance transfer, and novel view synthesis, within a unified framework. It means that the model, once being trained, can be used to handle all these tasks. The existing task-specific methods mainly use 2D keypoints (pose) to estimate the human body structure. However, they only express the position information with no abilities to characterize the personalized shape of the person and model the limb rotations. In this paper, we propose to use a 3D body mesh recovery module to disentangle the pose and shape. It can not only model the joint location and rotation but also characterize the personalized body shape. To preserve the source information, such as texture, style, color, and face identity, we propose an Attentional Liquid Warping GAN with Attentional Liquid Warping Block (AttLWB) that propagates the source information in both image and feature spaces to the synthesized reference. Specifically, the source features are extracted by a denoising convolutional auto-encoder for characterizing the source identity well. Furthermore, our proposed method can support a more flexible warping from multiple sources. To further improve the generalization ability of the unseen source images, a one/few-shot adversarial learning is applied. In detail, it firstly trains a model in an extensive training set. Then, it finetunes the model by one/few-shot unseen image(s) in a self-supervised way to generate high-resolution (512 \texttimes{} 512 and 1024 \texttimes{} 1024) results. Also, we build a new dataset, namely Impersonator (iPER) dataset, for the evaluation of human motion imitation, appearance transfer, and novel view synthesis. Extensive experiments demonstrate the effectiveness of our methods in terms of preserving face identity, shape consistency, and clothes details. All codes and dataset are available on https://impersonator.org/work/impersonator-plus-plus.html.},
archiveprefix = {arXiv},
eprint = {2011.09055},
eprinttype = {arxiv},
journal = {arXiv:2011.09055 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{liuLiquidWarpingGAN2020,
title = {Liquid {{Warping GAN}} with {{Attention}}: {{A Unified Framework}} for {{Human Image Synthesis}}},
shorttitle = {Liquid {{Warping GAN}} with {{Attention}}},
author = {Liu, Wen and Piao, Zhixin and Tu, Zhi and Luo, Wenhan and Ma, Lin and Gao, Shenghua},
year = {2020},
month = nov,
abstract = {We tackle human image synthesis, including human motion imitation, appearance transfer, and novel view synthesis, within a unified framework. It means that the model, once being trained, can be used to handle all these tasks. The existing task-specific methods mainly use 2D keypoints (pose) to estimate the human body structure. However, they only express the position information with no abilities to characterize the personalized shape of the person and model the limb rotations. In this paper, we propose to use a 3D body mesh recovery module to disentangle the pose and shape. It can not only model the joint location and rotation but also characterize the personalized body shape. To preserve the source information, such as texture, style, color, and face identity, we propose an Attentional Liquid Warping GAN with Attentional Liquid Warping Block (AttLWB) that propagates the source information in both image and feature spaces to the synthesized reference. Specifically, the source features are extracted by a denoising convolutional auto-encoder for characterizing the source identity well. Furthermore, our proposed method can support a more flexible warping from multiple sources. To further improve the generalization ability of the unseen source images, a one/few-shot adversarial learning is applied. In detail, it firstly trains a model in an extensive training set. Then, it finetunes the model by one/few-shot unseen image(s) in a self-supervised way to generate high-resolution (512 \texttimes{} 512 and 1024 \texttimes{} 1024) results. Also, we build a new dataset, namely Impersonator (iPER) dataset, for the evaluation of human motion imitation, appearance transfer, and novel view synthesis. Extensive experiments demonstrate the effectiveness of our methods in terms of preserving face identity, shape consistency, and clothes details. All codes and dataset are available on https://impersonator.org/work/impersonator-plus-plus.html.},
archiveprefix = {arXiv},
eprint = {2011.09055},
eprinttype = {arxiv},
journal = {arXiv:2011.09055 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{lucicAreGANsCreated,
title = {Are {{GANs Created Equal}}? {{A Large}}-{{Scale Study}}},
author = {Lucic, Mario and Kurach, Karol and Michalski, Marcin and Gelly, Sylvain and Bousquet, Olivier},
pages = {10},
abstract = {Generative adversarial networks (GAN) are a powerful subclass of generative models. Despite a very rich research activity leading to numerous interesting GAN algorithms, it is still very hard to assess which algorithm(s) perform better than others. We conduct a neutral, multi-faceted large-scale empirical study on state-of-the art models and evaluation measures. We find that most models can reach similar scores with enough hyperparameter optimization and random restarts. This suggests that improvements can arise from a higher computational budget and tuning more than fundamental algorithmic changes. To overcome some limitations of the current metrics, we also propose several data sets on which precision and recall can be computed. Our experimental results suggest that future GAN research should be based on more systematic and objective evaluation procedures. Finally, we did not find evidence that any of the tested algorithms consistently outperforms the non-saturating GAN introduced in [9].},
language = {English}
}
@article{lucicAreGANsCreated,
title = {Are {{GANs Created Equal}}? {{A Large}}-{{Scale Study}}},
author = {Lucic, Mario and Kurach, Karol and Michalski, Marcin and Gelly, Sylvain and Bousquet, Olivier},
pages = {10},
abstract = {Generative adversarial networks (GAN) are a powerful subclass of generative models. Despite a very rich research activity leading to numerous interesting GAN algorithms, it is still very hard to assess which algorithm(s) perform better than others. We conduct a neutral, multi-faceted large-scale empirical study on state-of-the art models and evaluation measures. We find that most models can reach similar scores with enough hyperparameter optimization and random restarts. This suggests that improvements can arise from a higher computational budget and tuning more than fundamental algorithmic changes. To overcome some limitations of the current metrics, we also propose several data sets on which precision and recall can be computed. Our experimental results suggest that future GAN research should be based on more systematic and objective evaluation procedures. Finally, we did not find evidence that any of the tested algorithms consistently outperforms the non-saturating GAN introduced in [9].},
language = {English}
}
@article{maoLeastSquaresGenerative2017,
title = {Least {{Squares Generative Adversarial Networks}}},
author = {Mao, Xudong and Li, Qing and Xie, Haoran and Lau, Raymond Y. K. and Wang, Zhen and Smolley, Stephen Paul},
year = {2017},
month = apr,
abstract = {Unsupervised learning with generative adversarial networks (GANs) has proven hugely successful. Regular GANs hypothesize the discriminator as a classifier with the sigmoid cross entropy loss function. However, we found that this loss function may lead to the vanishing gradients problem during the learning process. To overcome such a problem, we propose in this paper the Least Squares Generative Adversarial Networks (LSGANs) which adopt the least squares loss function for the discriminator. We show that minimizing the objective function of LSGAN yields minimizing the Pearson {$\chi$}2 divergence. There are two benefits of LSGANs over regular GANs. First, LSGANs are able to generate higher quality images than regular GANs. Second, LSGANs perform more stable during the learning process. We evaluate LSGANs on five scene datasets and the experimental results show that the images generated by LSGANs are of better quality than the ones generated by regular GANs. We also conduct two comparison experiments between LSGANs and regular GANs to illustrate the stability of LSGANs.},
archiveprefix = {arXiv},
eprint = {1611.04076},
eprinttype = {arxiv},
journal = {arXiv:1611.04076 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{maoLeastSquaresGenerative2017,
title = {Least {{Squares Generative Adversarial Networks}}},
author = {Mao, Xudong and Li, Qing and Xie, Haoran and Lau, Raymond Y. K. and Wang, Zhen and Smolley, Stephen Paul},
year = {2017},
month = apr,
abstract = {Unsupervised learning with generative adversarial networks (GANs) has proven hugely successful. Regular GANs hypothesize the discriminator as a classifier with the sigmoid cross entropy loss function. However, we found that this loss function may lead to the vanishing gradients problem during the learning process. To overcome such a problem, we propose in this paper the Least Squares Generative Adversarial Networks (LSGANs) which adopt the least squares loss function for the discriminator. We show that minimizing the objective function of LSGAN yields minimizing the Pearson {$\chi$}2 divergence. There are two benefits of LSGANs over regular GANs. First, LSGANs are able to generate higher quality images than regular GANs. Second, LSGANs perform more stable during the learning process. We evaluate LSGANs on five scene datasets and the experimental results show that the images generated by LSGANs are of better quality than the ones generated by regular GANs. We also conduct two comparison experiments between LSGANs and regular GANs to illustrate the stability of LSGANs.},
archiveprefix = {arXiv},
eprint = {1611.04076},
eprinttype = {arxiv},
journal = {arXiv:1611.04076 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{maoLeastSquaresGenerative2017a,
title = {Least {{Squares Generative Adversarial Networks}}},
author = {Mao, Xudong and Li, Qing and Xie, Haoran and Lau, Raymond Y. K. and Wang, Zhen and Smolley, Stephen Paul},
year = {2017},
month = apr,
abstract = {Unsupervised learning with generative adversarial networks (GANs) has proven hugely successful. Regular GANs hypothesize the discriminator as a classifier with the sigmoid cross entropy loss function. However, we found that this loss function may lead to the vanishing gradients problem during the learning process. To overcome such a problem, we propose in this paper the Least Squares Generative Adversarial Networks (LSGANs) which adopt the least squares loss function for the discriminator. We show that minimizing the objective function of LSGAN yields minimizing the Pearson {$\chi$}2 divergence. There are two benefits of LSGANs over regular GANs. First, LSGANs are able to generate higher quality images than regular GANs. Second, LSGANs perform more stable during the learning process. We evaluate LSGANs on five scene datasets and the experimental results show that the images generated by LSGANs are of better quality than the ones generated by regular GANs. We also conduct two comparison experiments between LSGANs and regular GANs to illustrate the stability of LSGANs.},
archiveprefix = {arXiv},
eprint = {1611.04076},
eprinttype = {arxiv},
journal = {arXiv:1611.04076 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{maoLeastSquaresGenerative2017a,
title = {Least {{Squares Generative Adversarial Networks}}},
author = {Mao, Xudong and Li, Qing and Xie, Haoran and Lau, Raymond Y. K. and Wang, Zhen and Smolley, Stephen Paul},
year = {2017},
month = apr,
abstract = {Unsupervised learning with generative adversarial networks (GANs) has proven hugely successful. Regular GANs hypothesize the discriminator as a classifier with the sigmoid cross entropy loss function. However, we found that this loss function may lead to the vanishing gradients problem during the learning process. To overcome such a problem, we propose in this paper the Least Squares Generative Adversarial Networks (LSGANs) which adopt the least squares loss function for the discriminator. We show that minimizing the objective function of LSGAN yields minimizing the Pearson {$\chi$}2 divergence. There are two benefits of LSGANs over regular GANs. First, LSGANs are able to generate higher quality images than regular GANs. Second, LSGANs perform more stable during the learning process. We evaluate LSGANs on five scene datasets and the experimental results show that the images generated by LSGANs are of better quality than the ones generated by regular GANs. We also conduct two comparison experiments between LSGANs and regular GANs to illustrate the stability of LSGANs.},
archiveprefix = {arXiv},
eprint = {1611.04076},
eprinttype = {arxiv},
journal = {arXiv:1611.04076 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{maoLeastSquaresGenerative2017b,
title = {Least {{Squares Generative Adversarial Networks}}},
author = {Mao, Xudong and Li, Qing and Xie, Haoran and Lau, Raymond Y. K. and Wang, Zhen and Smolley, Stephen Paul},
year = {2017},
month = apr,
abstract = {Unsupervised learning with generative adversarial networks (GANs) has proven hugely successful. Regular GANs hypothesize the discriminator as a classifier with the sigmoid cross entropy loss function. However, we found that this loss function may lead to the vanishing gradients problem during the learning process. To overcome such a problem, we propose in this paper the Least Squares Generative Adversarial Networks (LSGANs) which adopt the least squares loss function for the discriminator. We show that minimizing the objective function of LSGAN yields minimizing the Pearson {$\chi$}2 divergence. There are two benefits of LSGANs over regular GANs. First, LSGANs are able to generate higher quality images than regular GANs. Second, LSGANs perform more stable during the learning process. We evaluate LSGANs on five scene datasets and the experimental results show that the images generated by LSGANs are of better quality than the ones generated by regular GANs. We also conduct two comparison experiments between LSGANs and regular GANs to illustrate the stability of LSGANs.},
archiveprefix = {arXiv},
eprint = {1611.04076},
eprinttype = {arxiv},
journal = {arXiv:1611.04076 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{maoLeastSquaresGenerative2017b,
title = {Least {{Squares Generative Adversarial Networks}}},
author = {Mao, Xudong and Li, Qing and Xie, Haoran and Lau, Raymond Y. K. and Wang, Zhen and Smolley, Stephen Paul},
year = {2017},
month = apr,
abstract = {Unsupervised learning with generative adversarial networks (GANs) has proven hugely successful. Regular GANs hypothesize the discriminator as a classifier with the sigmoid cross entropy loss function. However, we found that this loss function may lead to the vanishing gradients problem during the learning process. To overcome such a problem, we propose in this paper the Least Squares Generative Adversarial Networks (LSGANs) which adopt the least squares loss function for the discriminator. We show that minimizing the objective function of LSGAN yields minimizing the Pearson {$\chi$}2 divergence. There are two benefits of LSGANs over regular GANs. First, LSGANs are able to generate higher quality images than regular GANs. Second, LSGANs perform more stable during the learning process. We evaluate LSGANs on five scene datasets and the experimental results show that the images generated by LSGANs are of better quality than the ones generated by regular GANs. We also conduct two comparison experiments between LSGANs and regular GANs to illustrate the stability of LSGANs.},
archiveprefix = {arXiv},
eprint = {1611.04076},
eprinttype = {arxiv},
journal = {arXiv:1611.04076 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@incollection{meulendijkRiskMediationAssociation2017,
title = {Risk {{Mediation}} in {{Association Rules}}},
booktitle = {Artificial {{Intelligence}} in {{Medicine}}},
author = {Meulendijk, Michiel C. and Spruit, Marco R. and Brinkkemper, Sjaak},
editor = {{ten Teije}, Annette and Popow, Christian and Holmes, John H. and Sacchi, Lucia},
year = {2017},
volume = {10259},
pages = {327--331},
publisher = {{Springer International Publishing}},
address = {{Cham}},
doi = {10.1007/978-3-319-59758-4_38},
isbn = {978-3-319-59757-7 978-3-319-59758-4},
language = {English}
}
@incollection{meulendijkRiskMediationAssociation2017,
title = {Risk {{Mediation}} in {{Association Rules}}},
booktitle = {Artificial {{Intelligence}} in {{Medicine}}},
author = {Meulendijk, Michiel C. and Spruit, Marco R. and Brinkkemper, Sjaak},
editor = {{ten Teije}, Annette and Popow, Christian and Holmes, John H. and Sacchi, Lucia},
year = {2017},
volume = {10259},
pages = {327--331},
publisher = {{Springer International Publishing}},
address = {{Cham}},
doi = {10.1007/978-3-319-59758-4_38},
isbn = {978-3-319-59757-7 978-3-319-59758-4},
language = {English}
}
@article{miyatoSpectralNormalizationGenerative2018,
title = {Spectral {{Normalization}} for {{Generative Adversarial Networks}}},
author = {Miyato, Takeru and Kataoka, Toshiki and Koyama, Masanori and Yoshida, Yuichi},
year = {2018},
month = feb,
abstract = {One of the challenges in the study of generative adversarial networks is the instability of its training. In this paper, we propose a novel weight normalization technique called spectral normalization to stabilize the training of the discriminator. Our new normalization technique is computationally light and easy to incorporate into existing implementations. We tested the efficacy of spectral normalization on CIFAR10, STL-10, and ILSVRC2012 dataset, and we experimentally confirmed that spectrally normalized GANs (SN-GANs) is capable of generating images of better or equal quality relative to the previous training stabilization techniques. The code with Chainer (Tokui et al., 2015), generated images and pretrained models are available at https://github.com/pfnet-research/sngan\_ projection.},
archiveprefix = {arXiv},
eprint = {1802.05957},
eprinttype = {arxiv},
journal = {arXiv:1802.05957 [cs, stat]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning,Statistics - Machine Learning},
language = {English},
primaryclass = {cs, stat}
}
@article{miyatoSpectralNormalizationGenerative2018,
title = {Spectral {{Normalization}} for {{Generative Adversarial Networks}}},
author = {Miyato, Takeru and Kataoka, Toshiki and Koyama, Masanori and Yoshida, Yuichi},
year = {2018},
month = feb,
abstract = {One of the challenges in the study of generative adversarial networks is the instability of its training. In this paper, we propose a novel weight normalization technique called spectral normalization to stabilize the training of the discriminator. Our new normalization technique is computationally light and easy to incorporate into existing implementations. We tested the efficacy of spectral normalization on CIFAR10, STL-10, and ILSVRC2012 dataset, and we experimentally confirmed that spectrally normalized GANs (SN-GANs) is capable of generating images of better or equal quality relative to the previous training stabilization techniques. The code with Chainer (Tokui et al., 2015), generated images and pretrained models are available at https://github.com/pfnet-research/sngan\_ projection.},
archiveprefix = {arXiv},
eprint = {1802.05957},
eprinttype = {arxiv},
journal = {arXiv:1802.05957 [cs, stat]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning,Statistics - Machine Learning},
language = {English},
primaryclass = {cs, stat}
}
@article{miyatoSpectralNormalizationGenerative2018a,
title = {Spectral {{Normalization}} for {{Generative Adversarial Networks}}},
author = {Miyato, Takeru and Kataoka, Toshiki and Koyama, Masanori and Yoshida, Yuichi},
year = {2018},
month = feb,
abstract = {One of the challenges in the study of generative adversarial networks is the instability of its training. In this paper, we propose a novel weight normalization technique called spectral normalization to stabilize the training of the discriminator. Our new normalization technique is computationally light and easy to incorporate into existing implementations. We tested the efficacy of spectral normalization on CIFAR10, STL-10, and ILSVRC2012 dataset, and we experimentally confirmed that spectrally normalized GANs (SN-GANs) is capable of generating images of better or equal quality relative to the previous training stabilization techniques. The code with Chainer (Tokui et al., 2015), generated images and pretrained models are available at https://github.com/pfnet-research/sngan\_ projection.},
archiveprefix = {arXiv},
eprint = {1802.05957},
eprinttype = {arxiv},
journal = {arXiv:1802.05957 [cs, stat]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning,Statistics - Machine Learning},
language = {English},
primaryclass = {cs, stat}
}
@article{miyatoSpectralNormalizationGenerative2018a,
title = {Spectral {{Normalization}} for {{Generative Adversarial Networks}}},
author = {Miyato, Takeru and Kataoka, Toshiki and Koyama, Masanori and Yoshida, Yuichi},
year = {2018},
month = feb,
abstract = {One of the challenges in the study of generative adversarial networks is the instability of its training. In this paper, we propose a novel weight normalization technique called spectral normalization to stabilize the training of the discriminator. Our new normalization technique is computationally light and easy to incorporate into existing implementations. We tested the efficacy of spectral normalization on CIFAR10, STL-10, and ILSVRC2012 dataset, and we experimentally confirmed that spectrally normalized GANs (SN-GANs) is capable of generating images of better or equal quality relative to the previous training stabilization techniques. The code with Chainer (Tokui et al., 2015), generated images and pretrained models are available at https://github.com/pfnet-research/sngan\_ projection.},
archiveprefix = {arXiv},
eprint = {1802.05957},
eprinttype = {arxiv},
journal = {arXiv:1802.05957 [cs, stat]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning,Statistics - Machine Learning},
language = {English},
primaryclass = {cs, stat}
}
@book{mollerModellingComputingSystems2013,
title = {Modelling {{Computing Systems}}: {{Mathematics}} for {{Computer Science}}},
shorttitle = {Modelling {{Computing Systems}}},
author = {Moller, Faron and Struth, Georg},
year = {2013},
publisher = {{Springer-Verlag}},
address = {{London}},
doi = {10.1007/978-1-84800-322-4},
abstract = {This engaging text presents the fundamental mathematics and modelling techniques for computing systems in a novel and light-hearted way, which can be easily followed by students at the very beginning of their university education. Key concepts are taught through a large collection of challenging yet fun mathematical games and logical puzzles that require no prior knowledge about computers. The text begins with intuition and examples as a basis from which precise concepts are then developed; demonstrating how, by working within the confines of a precise structured method, the occurrence of errors in the system can be drastically reduced. Features: demonstrates how game theory provides a paradigm for an intuitive understanding of the nature of computation; contains more than 400 exercises throughout the text, with detailed solutions to half of these presented at the end of the book, together with numerous theorems, definitions and examples; describes a modelling approach based on state transition systems.},
isbn = {978-1-84800-321-7},
language = {English},
series = {Undergraduate {{Topics}} in {{Computer Science}}}
}
@book{mollerModellingComputingSystems2013,
title = {Modelling {{Computing Systems}}: {{Mathematics}} for {{Computer Science}}},
shorttitle = {Modelling {{Computing Systems}}},
author = {Moller, Faron and Struth, Georg},
year = {2013},
publisher = {{Springer-Verlag}},
address = {{London}},
doi = {10.1007/978-1-84800-322-4},
abstract = {This engaging text presents the fundamental mathematics and modelling techniques for computing systems in a novel and light-hearted way, which can be easily followed by students at the very beginning of their university education. Key concepts are taught through a large collection of challenging yet fun mathematical games and logical puzzles that require no prior knowledge about computers. The text begins with intuition and examples as a basis from which precise concepts are then developed; demonstrating how, by working within the confines of a precise structured method, the occurrence of errors in the system can be drastically reduced. Features: demonstrates how game theory provides a paradigm for an intuitive understanding of the nature of computation; contains more than 400 exercises throughout the text, with detailed solutions to half of these presented at the end of the book, together with numerous theorems, definitions and examples; describes a modelling approach based on state transition systems.},
isbn = {978-1-84800-321-7},
language = {English},
series = {Undergraduate {{Topics}} in {{Computer Science}}}
}
@incollection{mosteiroMakingSenseViolence2020,
title = {Making {{Sense}} of {{Violence Risk Predictions Using Clinical Notes}}},
booktitle = {Health {{Information Science}}},
author = {Mosteiro, Pablo and Rijcken, Emil and Zervanou, Kalliopi and Kaymak, Uzay and Scheepers, Floortje and Spruit, Marco},
editor = {Huang, Zhisheng and Siuly, Siuly and Wang, Hua and Zhou, Rui and Zhang, Yanchun},
year = {2020},
volume = {12435},
pages = {3--14},
publisher = {{Springer International Publishing}},
address = {{Cham}},
doi = {10.1007/978-3-030-61951-0_1},
isbn = {978-3-030-61950-3 978-3-030-61951-0},
language = {English}
}
@incollection{mosteiroMakingSenseViolence2020,
title = {Making {{Sense}} of {{Violence Risk Predictions Using Clinical Notes}}},
booktitle = {Health {{Information Science}}},
author = {Mosteiro, Pablo and Rijcken, Emil and Zervanou, Kalliopi and Kaymak, Uzay and Scheepers, Floortje and Spruit, Marco},
editor = {Huang, Zhisheng and Siuly, Siuly and Wang, Hua and Zhou, Rui and Zhang, Yanchun},
year = {2020},
volume = {12435},
pages = {3--14},
publisher = {{Springer International Publishing}},
address = {{Cham}},
doi = {10.1007/978-3-030-61951-0_1},
isbn = {978-3-030-61950-3 978-3-030-61951-0},
language = {English}
}
@techreport{nistbigdatapublicworkinggroupdefinitionsandtaxonomiessubgroupNISTBigData2015,
title = {{{NIST Big Data Interoperability Framework}}: {{Volume}} 1, {{Definitions}}},
shorttitle = {{{NIST Big Data Interoperability Framework}}},
author = {{NIST Big Data Public Working Group Definitions and Taxonomies Subgroup}},
year = {2015},
month = oct,
pages = {NIST SP 1500-1},
institution = {{National Institute of Standards and Technology}},
doi = {10.6028/NIST.SP.1500-1},
abstract = {Big Data is a term used to describe the large amount of data in the networked, digitized, sensor-laden, information-driven world. While opportunities exist with Big Data, the data can overwhelm traditional technical approaches and the growth of data is outpacing scientific and technological advances in data analytics. To advance progress in Big Data, the NIST Big Data Public Working Group (NBD-PWG) is working to develop consensus on important, fundamental concepts related to Big Data. The results are reported in the NIST Big Data Interoperability Framework series of volumes. This volume, Volume 1, contains a definition of Big Data and related terms necessary to lay the groundwork for discussions surrounding Big Data.},
language = {English},
number = {NIST SP 1500-1}
}
@techreport{nistbigdatapublicworkinggroupdefinitionsandtaxonomiessubgroupNISTBigData2015,
title = {{{NIST Big Data Interoperability Framework}}: {{Volume}} 1, {{Definitions}}},
shorttitle = {{{NIST Big Data Interoperability Framework}}},
author = {{NIST Big Data Public Working Group Definitions and Taxonomies Subgroup}},
year = {2015},
month = oct,
pages = {NIST SP 1500-1},
institution = {{National Institute of Standards and Technology}},
doi = {10.6028/NIST.SP.1500-1},
abstract = {Big Data is a term used to describe the large amount of data in the networked, digitized, sensor-laden, information-driven world. While opportunities exist with Big Data, the data can overwhelm traditional technical approaches and the growth of data is outpacing scientific and technological advances in data analytics. To advance progress in Big Data, the NIST Big Data Public Working Group (NBD-PWG) is working to develop consensus on important, fundamental concepts related to Big Data. The results are reported in the NIST Big Data Interoperability Framework series of volumes. This volume, Volume 1, contains a definition of Big Data and related terms necessary to lay the groundwork for discussions surrounding Big Data.},
language = {English},
number = {NIST SP 1500-1}
}
@article{oomsSelfServiceData,
title = {Self- {{Service Data Science}} in {{Healthcare}}},
author = {Ooms, Richard},
pages = {40},
language = {English}
}
@article{oomsSelfServiceData,
title = {Self- {{Service Data Science}} in {{Healthcare}}},
author = {Ooms, Richard},
pages = {40},
language = {English}
}
@article{oomsSelfServiceDataScience2020,
title = {Self-{{Service Data Science}} in {{Healthcare}} with {{Automated Machine Learning}}},
author = {Ooms, Richard and Spruit, Marco},
year = {2020},
month = apr,
volume = {10},
pages = {2992},
issn = {2076-3417},
doi = {10.3390/app10092992},
abstract = {(1) Background: This work investigates whether and how researcher-physicians can be supported in their knowledge discovery process by employing Automated Machine Learning (AutoML). (2) Methods: We take a design science research approach and select the Tree-based Pipeline Optimization Tool (TPOT) as the AutoML method based on a benchmark test and requirements from researcher-physicians. We then integrate TPOT into two artefacts: a web application and a notebook. We evaluate these artefacts with researcher-physicians to examine which approach suits researcher-physicians best. Both artefacts have a similar workflow, but different user interfaces because of a conflict in requirements. (3) Results: Artefact A, a web application, was perceived as better for uploading a dataset and comparing results. Artefact B, a Jupyter notebook, was perceived as better regarding the workflow and being in control of model construction. (4) Conclusions: Thus, a hybrid artefact would be best for researcher-physicians. However, both artefacts missed model explainability and an explanation of variable importance for their created models. Hence, deployment of AutoML technologies in healthcare remains currently limited to the exploratory data analysis phase.},
journal = {Applied Sciences},
language = {English},
number = {9}
}
@article{oomsSelfServiceDataScience2020,
title = {Self-{{Service Data Science}} in {{Healthcare}} with {{Automated Machine Learning}}},
author = {Ooms, Richard and Spruit, Marco},
year = {2020},
month = apr,
volume = {10},
pages = {2992},
issn = {2076-3417},
doi = {10.3390/app10092992},
abstract = {(1) Background: This work investigates whether and how researcher-physicians can be supported in their knowledge discovery process by employing Automated Machine Learning (AutoML). (2) Methods: We take a design science research approach and select the Tree-based Pipeline Optimization Tool (TPOT) as the AutoML method based on a benchmark test and requirements from researcher-physicians. We then integrate TPOT into two artefacts: a web application and a notebook. We evaluate these artefacts with researcher-physicians to examine which approach suits researcher-physicians best. Both artefacts have a similar workflow, but different user interfaces because of a conflict in requirements. (3) Results: Artefact A, a web application, was perceived as better for uploading a dataset and comparing results. Artefact B, a Jupyter notebook, was perceived as better regarding the workflow and being in control of model construction. (4) Conclusions: Thus, a hybrid artefact would be best for researcher-physicians. However, both artefacts missed model explainability and an explanation of variable importance for their created models. Hence, deployment of AutoML technologies in healthcare remains currently limited to the exploratory data analysis phase.},
journal = {Applied Sciences},
language = {English},
number = {9}
}
@inproceedings{ottFindingDeceptiveOpinion2011,
title = {Finding {{Deceptive Opinion Spam}} by {{Any Stretch}} of the {{Imagination}}},
booktitle = {Proceedings of the 49th {{Annual Meeting}} of the {{Association}} for {{Computational Linguistics}}: {{Human Language Technologies}}},
author = {Ott, Myle and Choi, Yejin and Cardie, Claire and Hancock, Jeffrey T.},
year = {2011},
month = jun,
pages = {309--319},
publisher = {{Association for Computational Linguistics}},
address = {{Portland, Oregon, USA}}
}
@inproceedings{ottFindingDeceptiveOpinion2011,
title = {Finding {{Deceptive Opinion Spam}} by {{Any Stretch}} of the {{Imagination}}},
booktitle = {Proceedings of the 49th {{Annual Meeting}} of the {{Association}} for {{Computational Linguistics}}: {{Human Language Technologies}}},
author = {Ott, Myle and Choi, Yejin and Cardie, Claire and Hancock, Jeffrey T.},
year = {2011},
month = jun,
pages = {309--319},
publisher = {{Association for Computational Linguistics}},
address = {{Portland, Oregon, USA}}
}
@inproceedings{ottNegativeDeceptiveOpinion2013,
title = {Negative {{Deceptive Opinion Spam}}},
booktitle = {Proceedings of the 2013 {{Conference}} of the {{North American Chapter}} of the {{Association}} for {{Computational Linguistics}}: {{Human Language Technologies}}},
author = {Ott, Myle and Cardie, Claire and Hancock, Jeffrey T},
year = {2013},
pages = {497--501}
}
@inproceedings{ottNegativeDeceptiveOpinion2013,
title = {Negative {{Deceptive Opinion Spam}}},
booktitle = {Proceedings of the 2013 {{Conference}} of the {{North American Chapter}} of the {{Association}} for {{Computational Linguistics}}: {{Human Language Technologies}}},
author = {Ott, Myle and Cardie, Claire and Hancock, Jeffrey T},
year = {2013},
pages = {497--501}
}
@article{parkSemanticImageSynthesis2019,
title = {Semantic {{Image Synthesis}} with {{Spatially}}-{{Adaptive Normalization}}},
author = {Park, Taesung and Liu, Ming-Yu and Wang, Ting-Chun and Zhu, Jun-Yan},
year = {2019},
month = nov,
abstract = {We propose spatially-adaptive normalization, a simple but effective layer for synthesizing photorealistic images given an input semantic layout. Previous methods directly feed the semantic layout as input to the deep network, which is then processed through stacks of convolution, normalization, and nonlinearity layers. We show that this is suboptimal as the normalization layers tend to ``wash away'' semantic information. To address the issue, we propose using the input layout for modulating the activations in normalization layers through a spatially-adaptive, learned transformation. Experiments on several challenging datasets demonstrate the advantage of the proposed method over existing approaches, regarding both visual fidelity and alignment with input layouts. Finally, our model allows user control over both semantic and style. Code is available at https://github.com/NVlabs/SPADE .},
archiveprefix = {arXiv},
eprint = {1903.07291},
eprinttype = {arxiv},
journal = {arXiv:1903.07291 [cs]},
keywords = {Computer Science - Artificial Intelligence,Computer Science - Computer Vision and Pattern Recognition,Computer Science - Graphics,Computer Science - Machine Learning,I.3.3,I.5,I.5.4},
language = {English},
primaryclass = {cs}
}
@article{parkSemanticImageSynthesis2019,
title = {Semantic {{Image Synthesis}} with {{Spatially}}-{{Adaptive Normalization}}},
author = {Park, Taesung and Liu, Ming-Yu and Wang, Ting-Chun and Zhu, Jun-Yan},
year = {2019},
month = nov,
abstract = {We propose spatially-adaptive normalization, a simple but effective layer for synthesizing photorealistic images given an input semantic layout. Previous methods directly feed the semantic layout as input to the deep network, which is then processed through stacks of convolution, normalization, and nonlinearity layers. We show that this is suboptimal as the normalization layers tend to ``wash away'' semantic information. To address the issue, we propose using the input layout for modulating the activations in normalization layers through a spatially-adaptive, learned transformation. Experiments on several challenging datasets demonstrate the advantage of the proposed method over existing approaches, regarding both visual fidelity and alignment with input layouts. Finally, our model allows user control over both semantic and style. Code is available at https://github.com/NVlabs/SPADE .},
archiveprefix = {arXiv},
eprint = {1903.07291},
eprinttype = {arxiv},
file = {/home/mike/Dropbox/bibliography/pdfs/Park et al. - 2019 - Semantic Image Synthesis with Spatially-Adaptive N.pdf},
journal = {arXiv:1903.07291 [cs]},
keywords = {Computer Science - Artificial Intelligence,Computer Science - Computer Vision and Pattern Recognition,Computer Science - Graphics,Computer Science - Machine Learning,I.3.3,I.5,I.5.4},
language = {English},
primaryclass = {cs}
}
@article{porterAlgorithmSuffixStripping2006,
title = {An {{Algorithm}} for {{Suffix Stripping}}},
author = {Porter, M.F.},
year = {2006},
month = jul,
volume = {40},
pages = {211--218},
publisher = {{Emerald}},
doi = {10.1108/00330330610681286},
journal = {Program},
number = {3}
}
@article{porterAlgorithmSuffixStripping2006,
title = {An {{Algorithm}} for {{Suffix Stripping}}},
author = {Porter, M.F.},
year = {2006},
month = jul,
volume = {40},
pages = {211--218},
publisher = {{Emerald}},
doi = {10.1108/00330330610681286},
journal = {Program},
number = {3}
}
@article{qahtanDataScienceSociety,
title = {Data {{Science}} and {{Society Unsupervised Learning}}},
author = {Qahtan, Hakim},
pages = {40},
language = {English}
}
@article{qahtanDataScienceSociety,
title = {Data {{Science}} and {{Society Unsupervised Learning}}},
author = {Qahtan, Hakim},
pages = {40},
language = {English}
}
@article{qahtanDataScienceSocietya,
title = {Data {{Science}} and {{Society Regression}}},
author = {Qahtan, Hakim},
pages = {34},
language = {English}
}
@article{qahtanDataScienceSocietya,
title = {Data {{Science}} and {{Society Regression}}},
author = {Qahtan, Hakim},
pages = {34},
language = {English}
}
@article{qahtanDssStatInference,
title = {Dss {{Stat Inference}}},
author = {Qahtan, Hakim}
}
@article{qahtanDssStatInference,
title = {Dss {{Stat Inference}}},
author = {Qahtan, Hakim}
}
@article{ronnebergerUNetConvolutionalNetworks2015,
title = {U-{{Net}}: {{Convolutional Networks}} for {{Biomedical Image Segmentation}}},
shorttitle = {U-{{Net}}},
author = {Ronneberger, Olaf and Fischer, Philipp and Brox, Thomas},
year = {2015},
month = may,
abstract = {There is large consent that successful training of deep networks requires many thousand annotated training samples. In this paper, we present a network and training strategy that relies on the strong use of data augmentation to use the available annotated samples more efficiently. The architecture consists of a contracting path to capture context and a symmetric expanding path that enables precise localization. We show that such a network can be trained end-to-end from very few images and outperforms the prior best method (a sliding-window convolutional network) on the ISBI challenge for segmentation of neuronal structures in electron microscopic stacks. Using the same network trained on transmitted light microscopy images (phase contrast and DIC) we won the ISBI cell tracking challenge 2015 in these categories by a large margin. Moreover, the network is fast. Segmentation of a 512x512 image takes less than a second on a recent GPU. The full implementation (based on Caffe) and the trained networks are available at http://lmb.informatik.uni-freiburg.de/people/ronneber/u-net.},
archiveprefix = {arXiv},
eprint = {1505.04597},
eprinttype = {arxiv},
journal = {arXiv:1505.04597 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{ronnebergerUNetConvolutionalNetworks2015,
title = {U-{{Net}}: {{Convolutional Networks}} for {{Biomedical Image Segmentation}}},
shorttitle = {U-{{Net}}},
author = {Ronneberger, Olaf and Fischer, Philipp and Brox, Thomas},
year = {2015},
month = may,
abstract = {There is large consent that successful training of deep networks requires many thousand annotated training samples. In this paper, we present a network and training strategy that relies on the strong use of data augmentation to use the available annotated samples more efficiently. The architecture consists of a contracting path to capture context and a symmetric expanding path that enables precise localization. We show that such a network can be trained end-to-end from very few images and outperforms the prior best method (a sliding-window convolutional network) on the ISBI challenge for segmentation of neuronal structures in electron microscopic stacks. Using the same network trained on transmitted light microscopy images (phase contrast and DIC) we won the ISBI cell tracking challenge 2015 in these categories by a large margin. Moreover, the network is fast. Segmentation of a 512x512 image takes less than a second on a recent GPU. The full implementation (based on Caffe) and the trained networks are available at http://lmb.informatik.uni-freiburg.de/people/ronneber/u-net.},
archiveprefix = {arXiv},
eprint = {1505.04597},
eprinttype = {arxiv},
journal = {arXiv:1505.04597 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{santurkarHowDoesBatch2019,
title = {How {{Does Batch Normalization Help Optimization}}?},
author = {Santurkar, Shibani and Tsipras, Dimitris and Ilyas, Andrew and Madry, Aleksander},
year = {2019},
month = apr,
abstract = {Batch Normalization (BatchNorm) is a widely adopted technique that enables faster and more stable training of deep neural networks (DNNs). Despite its pervasiveness, the exact reasons for BatchNorm's effectiveness are still poorly understood. The popular belief is that this effectiveness stems from controlling the change of the layers' input distributions during training to reduce the so-called ``internal covariate shift''. In this work, we demonstrate that such distributional stability of layer inputs has little to do with the success of BatchNorm. Instead, we uncover a more fundamental impact of BatchNorm on the training process: it makes the optimization landscape significantly smoother. This smoothness induces a more predictive and stable behavior of the gradients, allowing for faster training.},
archiveprefix = {arXiv},
eprint = {1805.11604},
eprinttype = {arxiv},
journal = {arXiv:1805.11604 [cs, stat]},
keywords = {Computer Science - Machine Learning,Computer Science - Neural and Evolutionary Computing,Statistics - Machine Learning},
language = {English},
primaryclass = {cs, stat}
}
@article{santurkarHowDoesBatch2019,
title = {How {{Does Batch Normalization Help Optimization}}?},
author = {Santurkar, Shibani and Tsipras, Dimitris and Ilyas, Andrew and Madry, Aleksander},
year = {2019},
month = apr,
abstract = {Batch Normalization (BatchNorm) is a widely adopted technique that enables faster and more stable training of deep neural networks (DNNs). Despite its pervasiveness, the exact reasons for BatchNorm's effectiveness are still poorly understood. The popular belief is that this effectiveness stems from controlling the change of the layers' input distributions during training to reduce the so-called ``internal covariate shift''. In this work, we demonstrate that such distributional stability of layer inputs has little to do with the success of BatchNorm. Instead, we uncover a more fundamental impact of BatchNorm on the training process: it makes the optimization landscape significantly smoother. This smoothness induces a more predictive and stable behavior of the gradients, allowing for faster training.},
archiveprefix = {arXiv},
eprint = {1805.11604},
eprinttype = {arxiv},
journal = {arXiv:1805.11604 [cs, stat]},
keywords = {Computer Science - Machine Learning,Computer Science - Neural and Evolutionary Computing,Statistics - Machine Learning},
language = {English},
primaryclass = {cs, stat}
}
@article{seddikNaturalLanguageProcessing,
title = {Natural {{Language Processing}} \& {{Word Embeddings}}},
author = {Seddik, Noha and Sarhan, Injy},
pages = {59},
language = {English}
}
@article{seddikNaturalLanguageProcessing,
title = {Natural {{Language Processing}} \& {{Word Embeddings}}},
author = {Seddik, Noha and Sarhan, Injy},
pages = {59},
language = {English}
}
@article{SignCreateFree,
title = {Sign {{Up}} \textendash{} {{Create}} a {{Free Account}} | {{Grammarly}}}
}
@article{SignCreateFree,
title = {Sign {{Up}} \textendash{} {{Create}} a {{Free Account}} | {{Grammarly}}}
}
@article{SignCreateFreea,
title = {Sign {{Up}} \textendash{} {{Create}} a {{Free Account}} | {{Grammarly}}}
}
@article{SignCreateFreea,
title = {Sign {{Up}} \textendash{} {{Create}} a {{Free Account}} | {{Grammarly}}}
}
@article{SignCreateFreeb,
title = {Sign {{Up}} \textendash{} {{Create}} a {{Free Account}} | {{Grammarly}}}
}
@article{SignCreateFreeb,
title = {Sign {{Up}} \textendash{} {{Create}} a {{Free Account}} | {{Grammarly}}}
}
@article{simoniMassCytometryPowerful2018,
title = {Mass Cytometry: {{A}} Powerful Tool for Dissecting the Immune Landscape},
shorttitle = {Mass Cytometry},
author = {Simoni, Yannick and Chng, Melissa Hui Yen and Li, Shamin and Fehlings, Michael and Newell, Evan W},
year = {2018},
month = apr,
volume = {51},
pages = {187--196},
issn = {0952-7915},
doi = {10.1016/j.coi.2018.03.023},
abstract = {Advancement in methodologies for single cell analysis has historically been a major driver of progress in immunology. Currently, high dimensional flow cytometry, mass cytometry and various forms of single cell sequencing-based analysis methods are being widely adopted to expose the staggering heterogeneity of immune cells in many contexts. Here, we focus on mass cytometry, a form of flow cytometry that allows for simultaneous interrogation of more than 40 different marker molecules, including cytokines and transcription factors, without the need for spectral compensation. We argue that mass cytometry occupies an important niche within the landscape of single-cell analysis platforms that enables the efficient and in-depth study of diverse immune cell subsets with an ability to zoom-in on myeloid and lymphoid compartments in various tissues in health and disease. We further discuss the unique features of mass cytometry that are favorable for combining multiplex peptide-MHC multimer technology and phenotypic characterization of antigen specific T cells. By referring to recent studies revealing the complexities of tumor immune infiltrates, we highlight the particular importance of this technology for studying cancer in the context of cancer immunotherapy. Finally, we provide thoughts on current technical limitations and how we imagine these being overcome.},
journal = {Current Opinion in Immunology},
language = {English}
}
@article{simoniMassCytometryPowerful2018,
title = {Mass Cytometry: {{A}} Powerful Tool for Dissecting the Immune Landscape},
shorttitle = {Mass Cytometry},
author = {Simoni, Yannick and Chng, Melissa Hui Yen and Li, Shamin and Fehlings, Michael and Newell, Evan W},
year = {2018},
month = apr,
volume = {51},
pages = {187--196},
issn = {0952-7915},
doi = {10.1016/j.coi.2018.03.023},
abstract = {Advancement in methodologies for single cell analysis has historically been a major driver of progress in immunology. Currently, high dimensional flow cytometry, mass cytometry and various forms of single cell sequencing-based analysis methods are being widely adopted to expose the staggering heterogeneity of immune cells in many contexts. Here, we focus on mass cytometry, a form of flow cytometry that allows for simultaneous interrogation of more than 40 different marker molecules, including cytokines and transcription factors, without the need for spectral compensation. We argue that mass cytometry occupies an important niche within the landscape of single-cell analysis platforms that enables the efficient and in-depth study of diverse immune cell subsets with an ability to zoom-in on myeloid and lymphoid compartments in various tissues in health and disease. We further discuss the unique features of mass cytometry that are favorable for combining multiplex peptide-MHC multimer technology and phenotypic characterization of antigen specific T cells. By referring to recent studies revealing the complexities of tumor immune infiltrates, we highlight the particular importance of this technology for studying cancer in the context of cancer immunotherapy. Finally, we provide thoughts on current technical limitations and how we imagine these being overcome.},
file = {/home/mike/Dropbox/bibliography/pdfs/Simoni et al_2018_Mass cytometry.pdf},
journal = {Current Opinion in Immunology},
language = {English}
}
@article{simonyanVeryDeepConvolutional2015,
title = {Very {{Deep Convolutional Networks}} for {{Large}}-{{Scale Image Recognition}}},
author = {Simonyan, Karen and Zisserman, Andrew},
year = {2015},
month = apr,
abstract = {In this work we investigate the effect of the convolutional network depth on its accuracy in the large-scale image recognition setting. Our main contribution is a thorough evaluation of networks of increasing depth using an architecture with very small (3 \texttimes{} 3) convolution filters, which shows that a significant improvement on the prior-art configurations can be achieved by pushing the depth to 16\textendash 19 weight layers. These findings were the basis of our ImageNet Challenge 2014 submission, where our team secured the first and the second places in the localisation and classification tracks respectively. We also show that our representations generalise well to other datasets, where they achieve state-of-the-art results. We have made our two best-performing ConvNet models publicly available to facilitate further research on the use of deep visual representations in computer vision.},
archiveprefix = {arXiv},
eprint = {1409.1556},
eprinttype = {arxiv},
journal = {arXiv:1409.1556 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{simonyanVeryDeepConvolutional2015,
title = {Very {{Deep Convolutional Networks}} for {{Large}}-{{Scale Image Recognition}}},
author = {Simonyan, Karen and Zisserman, Andrew},
year = {2015},
month = apr,
abstract = {In this work we investigate the effect of the convolutional network depth on its accuracy in the large-scale image recognition setting. Our main contribution is a thorough evaluation of networks of increasing depth using an architecture with very small (3 \texttimes{} 3) convolution filters, which shows that a significant improvement on the prior-art configurations can be achieved by pushing the depth to 16\textendash 19 weight layers. These findings were the basis of our ImageNet Challenge 2014 submission, where our team secured the first and the second places in the localisation and classification tracks respectively. We also show that our representations generalise well to other datasets, where they achieve state-of-the-art results. We have made our two best-performing ConvNet models publicly available to facilitate further research on the use of deep visual representations in computer vision.},
archiveprefix = {arXiv},
eprint = {1409.1556},
eprinttype = {arxiv},
journal = {arXiv:1409.1556 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{sobolevAdjuvantedInfluenzaH1N1Vaccination2016,
title = {Adjuvanted Influenza-{{H1N1}} Vaccination Reveals Lymphoid Signatures of Age-Dependent Early Responses and of Clinical Adverse Events},
author = {Sobolev, Olga and Binda, Elisa and O'Farrell, Sean and Lorenc, Anna and Pradines, Joel and Huang, Yongqing and Duffner, Jay and Schulz, Reiner and Cason, John and Zambon, Maria and Malim, Michael H and Peakman, Mark and Cope, Andrew and Capila, Ishan and Kaundinya, Ganesh V and Hayday, Adrian C},
year = {2016},
month = feb,
volume = {17},
pages = {204--213},
issn = {1529-2908, 1529-2916},
doi = {10.1038/ni.3328},
journal = {Nature Immunology},
language = {English},
number = {2}
}
@article{sobolevAdjuvantedInfluenzaH1N1Vaccination2016,
title = {Adjuvanted Influenza-{{H1N1}} Vaccination Reveals Lymphoid Signatures of Age-Dependent Early Responses and of Clinical Adverse Events},
author = {Sobolev, Olga and Binda, Elisa and O'Farrell, Sean and Lorenc, Anna and Pradines, Joel and Huang, Yongqing and Duffner, Jay and Schulz, Reiner and Cason, John and Zambon, Maria and Malim, Michael H and Peakman, Mark and Cope, Andrew and Capila, Ishan and Kaundinya, Ganesh V and Hayday, Adrian C},
year = {2016},
month = feb,
volume = {17},
pages = {204--213},
issn = {1529-2908, 1529-2916},
doi = {10.1038/ni.3328},
journal = {Nature Immunology},
language = {English},
number = {2}
}
@article{spruitAppliedDataScience,
title = {Applied {{Data Science}} for {{Student Empowerment And}} the {{Data Science}} \& {{Society Course}} at {{Utrecht University}}},
author = {Spruit, Marco},
pages = {60},
language = {English}
}
@article{spruitAppliedDataScience,
title = {Applied {{Data Science}} for {{Student Empowerment And}} the {{Data Science}} \& {{Society Course}} at {{Utrecht University}}},
author = {Spruit, Marco},
pages = {60},
language = {English}
}
@article{spruitAppliedDataScience2018,
title = {Applied {{Data Science}} in {{Patient}}-{{Centric Healthcare}}: {{Adaptive Analytic Systems}} for {{Empowering Physicians}} and {{Patients}}},
shorttitle = {Applied {{Data Science}} in {{Patient}}-{{Centric Healthcare}}},
author = {Spruit, Marco and Lytras, Miltiadis},
year = {2018},
month = jul,
volume = {35},
pages = {643--653},
issn = {07365853},
doi = {10.1016/j.tele.2018.04.002},
abstract = {We define the emerging research field of applied data science as the knowledge discovery process in which analytic systems are designed and evaluated to improve the daily practices of domain experts. We investigate adaptive analytic systems as a novel research perspective of the three intertwining aspects within the knowledge discovery process in healthcare: domain and data understanding for physician- and patient-centric healthcare, data preprocessing and modelling using natural language processing and (big) data analytic techniques, and model evaluation and knowledge deployment through information infrastructures. We align these knowledge discovery aspects with the design science research steps of problem investigation, treatment design, and treatment validation, respectively. We note that the adaptive component in healthcare system prototypes may translate to data-driven personalisation aspects including personalised medicine. We explore how applied data science for patient-centric healthcare can thus empower physicians and patients to more effectively and efficiently improve healthcare. We propose meta-algorithmic modelling as a solution-oriented design science research framework in alignment with the knowledge discovery process to address the three key dilemmas in the emerging ``post-algorithmic era'' of data science: depth versus breadth, selection versus configuration, and accuracy versus transparency.},
journal = {Telematics and Informatics},
language = {English},
number = {4}
}
@article{spruitAppliedDataScience2018,
title = {Applied {{Data Science}} in {{Patient}}-{{Centric Healthcare}}: {{Adaptive Analytic Systems}} for {{Empowering Physicians}} and {{Patients}}},
shorttitle = {Applied {{Data Science}} in {{Patient}}-{{Centric Healthcare}}},
author = {Spruit, Marco and Lytras, Miltiadis},
year = {2018},
month = jul,
volume = {35},
pages = {643--653},
issn = {07365853},
doi = {10.1016/j.tele.2018.04.002},
abstract = {We define the emerging research field of applied data science as the knowledge discovery process in which analytic systems are designed and evaluated to improve the daily practices of domain experts. We investigate adaptive analytic systems as a novel research perspective of the three intertwining aspects within the knowledge discovery process in healthcare: domain and data understanding for physician- and patient-centric healthcare, data preprocessing and modelling using natural language processing and (big) data analytic techniques, and model evaluation and knowledge deployment through information infrastructures. We align these knowledge discovery aspects with the design science research steps of problem investigation, treatment design, and treatment validation, respectively. We note that the adaptive component in healthcare system prototypes may translate to data-driven personalisation aspects including personalised medicine. We explore how applied data science for patient-centric healthcare can thus empower physicians and patients to more effectively and efficiently improve healthcare. We propose meta-algorithmic modelling as a solution-oriented design science research framework in alignment with the knowledge discovery process to address the three key dilemmas in the emerging ``post-algorithmic era'' of data science: depth versus breadth, selection versus configuration, and accuracy versus transparency.},
journal = {Telematics and Informatics},
language = {English},
number = {4}
}
@article{spruitCRISPDCWCrossIndustryStandard2019,
title = {{{CRISP}}-{{DCW}}: {{The Cross}}-{{Industry Standard Process}} for {{Creating Distributed Computing Workflows}}},
author = {Spruit, Marco and Meijers, Stijn},
year = {2019},
pages = {31},
language = {English}
}
@article{spruitCRISPDCWCrossIndustryStandard2019,
title = {{{CRISP}}-{{DCW}}: {{The Cross}}-{{Industry Standard Process}} for {{Creating Distributed Computing Workflows}}},
author = {Spruit, Marco and Meijers, Stijn},
year = {2019},
pages = {31},
language = {English}
}
@article{spruitKnowledgeDiscoveryProcess,
title = {The {{Knowledge Discovery Process}} for {{Societal Impact}}},
author = {Spruit, Marco},
pages = {68},
language = {English}
}
@article{spruitKnowledgeDiscoveryProcess,
title = {The {{Knowledge Discovery Process}} for {{Societal Impact}}},
author = {Spruit, Marco},
pages = {68},
language = {English}
}
@article{spruitMapreduceTutorial,
title = {Mapreduce {{Tutorial}}},
author = {Spruit, Marco}
}
@article{spruitMapreduceTutorial,
title = {Mapreduce {{Tutorial}}},
author = {Spruit, Marco}
}
@article{spruitNaturalLanguageProcessing,
title = {Natural {{Language Processing}} ({{NLP}})},
author = {Spruit, Marco},
pages = {41},
language = {English}
}
@article{spruitNaturalLanguageProcessing,
title = {Natural {{Language Processing}} ({{NLP}})},
author = {Spruit, Marco},
pages = {41},
language = {English}
}
@article{spruitNaturalLanguageProcessinga,
title = {Natural {{Language Processing}} ({{NLP}})},
author = {Spruit, Marco},
pages = {33},
language = {English}
}
@article{spruitNaturalLanguageProcessinga,
title = {Natural {{Language Processing}} ({{NLP}})},
author = {Spruit, Marco},
pages = {33},
language = {English}
}
@article{spruitNosqlIntroduction,
title = {Nosql {{Introduction}}},
author = {{Spruit}}
}
@article{spruitNosqlIntroduction,
title = {Nosql {{Introduction}}},
author = {{Spruit}}
}
@article{spruitRecommenderSystems,
title = {Recommender {{Systems}}},
author = {Spruit, Marco},
pages = {15},
language = {English}
}
@article{spruitRecommenderSystems,
title = {Recommender {{Systems}}},
author = {Spruit, Marco},
pages = {15},
language = {English}
}
@book{spruitSqlPresentationDss,
title = {Sql {{Presentation Dss}}},
author = {Spruit, Marco}
}
@book{spruitSqlPresentationDss,
title = {Sql {{Presentation Dss}}},
author = {Spruit, Marco}
}
@article{spruitTrendsAppliedData,
title = {Trends in {{Applied Data Science}}},
author = {Spruit, Marco},
pages = {35},
language = {English}
}
@article{spruitTrendsAppliedData,
title = {Trends in {{Applied Data Science}}},
author = {Spruit, Marco},
pages = {35},
language = {English}
}
@article{sridharCellularImmuneCorrelates2013,
title = {Cellular Immune Correlates of Protection against Symptomatic Pandemic Influenza},
author = {Sridhar, Saranya and Begom, Shaima and Bermingham, Alison and Hoschler, Katja and Adamson, Walt and Carman, William and Bean, Thomas and Barclay, Wendy and Deeks, Jonathan J and Lalvani, Ajit},
year = {2013},
month = oct,
volume = {19},
pages = {1305--1312},
issn = {1078-8956, 1546-170X},
doi = {10.1038/nm.3350},
journal = {Nature Medicine},
language = {English},
number = {10}
}
@article{sridharCellularImmuneCorrelates2013,
title = {Cellular Immune Correlates of Protection against Symptomatic Pandemic Influenza},
author = {Sridhar, Saranya and Begom, Shaima and Bermingham, Alison and Hoschler, Katja and Adamson, Walt and Carman, William and Bean, Thomas and Barclay, Wendy and Deeks, Jonathan J and Lalvani, Ajit},
year = {2013},
month = oct,
volume = {19},
pages = {1305--1312},
issn = {1078-8956, 1546-170X},
doi = {10.1038/nm.3350},
journal = {Nature Medicine},
language = {English},
number = {10}
}
@article{suchDeepNeuroevolutionGenetic2018,
title = {Deep {{Neuroevolution}}: {{Genetic Algorithms Are}} a {{Competitive Alternative}} for {{Training Deep Neural Networks}} for {{Reinforcement Learning}}},
shorttitle = {Deep {{Neuroevolution}}},
author = {Such, Felipe Petroski and Madhavan, Vashisht and Conti, Edoardo and Lehman, Joel and Stanley, Kenneth O. and Clune, Jeff},
year = {2018},
month = apr,
abstract = {Deep artificial neural networks (DNNs) are typically trained via gradient-based learning algorithms, namely backpropagation. Evolution strategies (ES) can rival backprop-based algorithms such as Q-learning and policy gradients on challenging deep reinforcement learning (RL) problems. However, ES can be considered a gradient-based algorithm because it performs stochastic gradient descent via an operation similar to a finite-difference approximation of the gradient. That raises the question of whether non-gradient-based evolutionary algorithms can work at DNN scales. Here we demonstrate they can: we evolve the weights of a DNN with a simple, gradient-free, populationbased genetic algorithm (GA) and it performs well on hard deep RL problems, including Atari and humanoid locomotion. The Deep GA successfully evolves networks with over four million free parameters, the largest neural networks ever evolved with a traditional evolutionary algorithm. These results (1) expand our sense of the scale at which GAs can operate, (2) suggest intriguingly that in some cases following the gradient is not the best choice for optimizing performance, and (3) make immediately available the multitude of neuroevolution techniques that improve performance. We demonstrate the latter by showing that combining DNNs with novelty search, which encourages exploration on tasks with deceptive or sparse reward functions, can solve a high-dimensional problem on which reward-maximizing algorithms (e.g. DQN, A3C, ES, and the GA) fail. Additionally, the Deep GA is faster than ES, A3C, and DQN (it can train Atari in \$\textbackslash sim\$4 hours on one desktop or \$\textbackslash sim\$1 hour distributed on 720 cores), and enables a stateof-the-art, up to 10,000-fold compact encoding technique.},
archiveprefix = {arXiv},
eprint = {1712.06567},
eprinttype = {arxiv},
journal = {arXiv:1712.06567 [cs]},
keywords = {Computer Science - Machine Learning,Computer Science - Neural and Evolutionary Computing},
language = {English},
primaryclass = {cs}
}
@article{suchDeepNeuroevolutionGenetic2018,
title = {Deep {{Neuroevolution}}: {{Genetic Algorithms Are}} a {{Competitive Alternative}} for {{Training Deep Neural Networks}} for {{Reinforcement Learning}}},
shorttitle = {Deep {{Neuroevolution}}},
author = {Such, Felipe Petroski and Madhavan, Vashisht and Conti, Edoardo and Lehman, Joel and Stanley, Kenneth O. and Clune, Jeff},
year = {2018},
month = apr,
abstract = {Deep artificial neural networks (DNNs) are typically trained via gradient-based learning algorithms, namely backpropagation. Evolution strategies (ES) can rival backprop-based algorithms such as Q-learning and policy gradients on challenging deep reinforcement learning (RL) problems. However, ES can be considered a gradient-based algorithm because it performs stochastic gradient descent via an operation similar to a finite-difference approximation of the gradient. That raises the question of whether non-gradient-based evolutionary algorithms can work at DNN scales. Here we demonstrate they can: we evolve the weights of a DNN with a simple, gradient-free, populationbased genetic algorithm (GA) and it performs well on hard deep RL problems, including Atari and humanoid locomotion. The Deep GA successfully evolves networks with over four million free parameters, the largest neural networks ever evolved with a traditional evolutionary algorithm. These results (1) expand our sense of the scale at which GAs can operate, (2) suggest intriguingly that in some cases following the gradient is not the best choice for optimizing performance, and (3) make immediately available the multitude of neuroevolution techniques that improve performance. We demonstrate the latter by showing that combining DNNs with novelty search, which encourages exploration on tasks with deceptive or sparse reward functions, can solve a high-dimensional problem on which reward-maximizing algorithms (e.g. DQN, A3C, ES, and the GA) fail. Additionally, the Deep GA is faster than ES, A3C, and DQN (it can train Atari in \$\textbackslash sim\$4 hours on one desktop or \$\textbackslash sim\$1 hour distributed on 720 cores), and enables a stateof-the-art, up to 10,000-fold compact encoding technique.},
archiveprefix = {arXiv},
eprint = {1712.06567},
eprinttype = {arxiv},
journal = {arXiv:1712.06567 [cs]},
keywords = {Computer Science - Machine Learning,Computer Science - Neural and Evolutionary Computing},
language = {English},
primaryclass = {cs}
}
@article{syedMappingGlobalNetwork2019,
title = {Mapping the {{Global Network}} of {{Fisheries Science Collaboration}}},
author = {Syed, Shaheen and {n{\'i} Aodha}, Lia and Scougal, Callum and Spruit, Marco},
year = {2019},
month = sep,
volume = {20},
pages = {830--856},
issn = {1467-2960, 1467-2979},
doi = {10.1111/faf.12379},
abstract = {As socio-environmental problems have proliferated over the past decades, one narrative which has captured the attention of policymakers and scientists has been the need for collaborative research that spans traditional boundaries. Collaboration, it is argued, is imperative for solving these problems. Understanding how collaboration is occurring in practice is important, however, and may help explain the idea space across a field. In an effort to make sense of the shape of fisheries science, here we construct a co-authorship network of the field, from a data set comprising 73,240 scientific articles, drawn from 50 journals and published between 2000 and 2017. Using a combination of social network analysis and machine learning, the work first maps the global structure of scientific collaboration amongst fisheries scientists at the author, country and institutional levels. Second, it uncovers the hidden subgroups\textemdash{} here country clusters and communities of authors\textemdash{} within the network, detailing also the topical focus, publication outlets and relative impact of the largest fisheries science communities. We find that whilst the fisheries science network is becoming more geographically extensive, it is simultaneously becoming more intensive. The uncovered network exhibits characteristics suggestive of a thin style of collaboration, and groupings that are more regional than they are global. Although likely shaped by an array of overlapping micro- and macro-level factors, the analysis reveals a number of political\textendash{} economic patterns that merit reflection by both fisheries scientists and policymakers.},
journal = {Fish and Fisheries},
language = {English},
number = {5}
}
@article{syedMappingGlobalNetwork2019,
title = {Mapping the {{Global Network}} of {{Fisheries Science Collaboration}}},
author = {Syed, Shaheen and {n{\'i} Aodha}, Lia and Scougal, Callum and Spruit, Marco},
year = {2019},
month = sep,
volume = {20},
pages = {830--856},
issn = {1467-2960, 1467-2979},
doi = {10.1111/faf.12379},
abstract = {As socio-environmental problems have proliferated over the past decades, one narrative which has captured the attention of policymakers and scientists has been the need for collaborative research that spans traditional boundaries. Collaboration, it is argued, is imperative for solving these problems. Understanding how collaboration is occurring in practice is important, however, and may help explain the idea space across a field. In an effort to make sense of the shape of fisheries science, here we construct a co-authorship network of the field, from a data set comprising 73,240 scientific articles, drawn from 50 journals and published between 2000 and 2017. Using a combination of social network analysis and machine learning, the work first maps the global structure of scientific collaboration amongst fisheries scientists at the author, country and institutional levels. Second, it uncovers the hidden subgroups\textemdash{} here country clusters and communities of authors\textemdash{} within the network, detailing also the topical focus, publication outlets and relative impact of the largest fisheries science communities. We find that whilst the fisheries science network is becoming more geographically extensive, it is simultaneously becoming more intensive. The uncovered network exhibits characteristics suggestive of a thin style of collaboration, and groupings that are more regional than they are global. Although likely shaped by an array of overlapping micro- and macro-level factors, the analysis reveals a number of political\textendash{} economic patterns that merit reflection by both fisheries scientists and policymakers.},
journal = {Fish and Fisheries},
language = {English},
number = {5}
}
@article{tangDualAttentionGANs2020,
title = {Dual {{Attention GANs}} for {{Semantic Image Synthesis}}},
author = {Tang, Hao and Bai, Song and Sebe, Nicu},
year = {2020},
month = aug,
abstract = {In this paper, we focus on the semantic image synthesis task that aims at transferring semantic label maps to photo-realistic images. Existing methods lack effective semantic constraints to preserve the semantic information and ignore the structural correlations in both spatial and channel dimensions, leading to unsatisfactory blurry and artifact-prone results. To address these limitations, we propose a novel Dual Attention GAN (DAGAN) to synthesize photo-realistic and semantically-consistent images with fine details from the input layouts without imposing extra training overhead or modifying the network architectures of existing methods. We also propose two novel modules, i.e., position-wise Spatial Attention Module (SAM) and scale-wise Channel Attention Module (CAM), to capture semantic structure attention in spatial and channel dimensions, respectively. Specifically, SAM selectively correlates the pixels at each position by a spatial attention map, leading to pixels with the same semantic label being related to each other regardless of their spatial distances. Meanwhile, CAM selectively emphasizes the scalewise features at each channel by a channel attention map, which integrates associated features among all channel maps regardless of their scales. We finally sum the outputs of SAM and CAM to further improve feature representation. Extensive experiments on four challenging datasets show that DAGAN achieves remarkably better results than state-of-the-art methods, while using fewer model parameters. The source code and trained models are available at https://github.com/Ha0Tang/DAGAN.},
archiveprefix = {arXiv},
eprint = {2008.13024},
eprinttype = {arxiv},
journal = {arXiv:2008.13024 [cs]},
keywords = {Computer Science - Artificial Intelligence,Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning,Computer Science - Multimedia},
language = {English},
primaryclass = {cs}
}
@article{tangDualAttentionGANs2020,
title = {Dual {{Attention GANs}} for {{Semantic Image Synthesis}}},
author = {Tang, Hao and Bai, Song and Sebe, Nicu},
year = {2020},
month = aug,
abstract = {In this paper, we focus on the semantic image synthesis task that aims at transferring semantic label maps to photo-realistic images. Existing methods lack effective semantic constraints to preserve the semantic information and ignore the structural correlations in both spatial and channel dimensions, leading to unsatisfactory blurry and artifact-prone results. To address these limitations, we propose a novel Dual Attention GAN (DAGAN) to synthesize photo-realistic and semantically-consistent images with fine details from the input layouts without imposing extra training overhead or modifying the network architectures of existing methods. We also propose two novel modules, i.e., position-wise Spatial Attention Module (SAM) and scale-wise Channel Attention Module (CAM), to capture semantic structure attention in spatial and channel dimensions, respectively. Specifically, SAM selectively correlates the pixels at each position by a spatial attention map, leading to pixels with the same semantic label being related to each other regardless of their spatial distances. Meanwhile, CAM selectively emphasizes the scalewise features at each channel by a channel attention map, which integrates associated features among all channel maps regardless of their scales. We finally sum the outputs of SAM and CAM to further improve feature representation. Extensive experiments on four challenging datasets show that DAGAN achieves remarkably better results than state-of-the-art methods, while using fewer model parameters. The source code and trained models are available at https://github.com/Ha0Tang/DAGAN.},
archiveprefix = {arXiv},
eprint = {2008.13024},
eprinttype = {arxiv},
journal = {arXiv:2008.13024 [cs]},
keywords = {Computer Science - Artificial Intelligence,Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning,Computer Science - Multimedia},
language = {English},
primaryclass = {cs}
}
@article{tangEdgeGuidedGANs2020,
title = {Edge {{Guided GANs}} with {{Semantic Preserving}} for {{Semantic Image Synthesis}}},
author = {Tang, Hao and Qi, Xiaojuan and Xu, Dan and Torr, Philip H. S. and Sebe, Nicu},
year = {2020},
month = mar,
abstract = {We propose a novel Edge guided Generative Adversarial Network (EdgeGAN) for photo-realistic image synthesis from semantic layouts. Although considerable improvement has been achieved, the quality of synthesized images is far from satisfactory due to two largely unresolved challenges. First, the semantic labels do not provide detailed structural information, making it difficult to synthesize local details and structures. Second, the widely adopted CNN operations such as convolution, down-sampling and normalization usually cause spatial resolution loss and thus are unable to fully preserve the original semantic information, leading to semantically inconsistent results (e.g., missing small objects). To tackle the first challenge, we propose to use edge as an intermediate representation which is further adopted to guide image generation via a proposed attention guided edge transfer module. Edge information is produced by a convolutional generator and introduces detailed structure information. Further, to preserve the semantic information, we design an effective module to selectively highlight class-dependent feature maps according to the original semantic layout. Extensive experiments on two challenging datasets show that the proposed EdgeGAN can generate significantly better results than state-ofthe-art methods. The source code and trained models are available at https://github.com/Ha0Tang/EdgeGAN.},
archiveprefix = {arXiv},
eprint = {2003.13898},
eprinttype = {arxiv},
journal = {arXiv:2003.13898 [cs, eess]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning,Electrical Engineering and Systems Science - Image and Video Processing},
language = {English},
primaryclass = {cs, eess}
}
@article{tangEdgeGuidedGANs2020,
title = {Edge {{Guided GANs}} with {{Semantic Preserving}} for {{Semantic Image Synthesis}}},
author = {Tang, Hao and Qi, Xiaojuan and Xu, Dan and Torr, Philip H. S. and Sebe, Nicu},
year = {2020},
month = mar,
abstract = {We propose a novel Edge guided Generative Adversarial Network (EdgeGAN) for photo-realistic image synthesis from semantic layouts. Although considerable improvement has been achieved, the quality of synthesized images is far from satisfactory due to two largely unresolved challenges. First, the semantic labels do not provide detailed structural information, making it difficult to synthesize local details and structures. Second, the widely adopted CNN operations such as convolution, down-sampling and normalization usually cause spatial resolution loss and thus are unable to fully preserve the original semantic information, leading to semantically inconsistent results (e.g., missing small objects). To tackle the first challenge, we propose to use edge as an intermediate representation which is further adopted to guide image generation via a proposed attention guided edge transfer module. Edge information is produced by a convolutional generator and introduces detailed structure information. Further, to preserve the semantic information, we design an effective module to selectively highlight class-dependent feature maps according to the original semantic layout. Extensive experiments on two challenging datasets show that the proposed EdgeGAN can generate significantly better results than state-ofthe-art methods. The source code and trained models are available at https://github.com/Ha0Tang/EdgeGAN.},
archiveprefix = {arXiv},
eprint = {2003.13898},
eprinttype = {arxiv},
journal = {arXiv:2003.13898 [cs, eess]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning,Electrical Engineering and Systems Science - Image and Video Processing},
language = {English},
primaryclass = {cs, eess}
}
@article{tanSemanticImageSynthesis2020,
title = {Semantic {{Image Synthesis}} via {{Efficient Class}}-{{Adaptive Normalization}}},
author = {Tan, Zhentao and Chen, Dongdong and Chu, Qi and Chai, Menglei and Liao, Jing and He, Mingming and Yuan, Lu and Hua, Gang and Yu, Nenghai},
year = {2020},
month = dec,
abstract = {Spatially-adaptive normalization (SPADE) is remarkably successful recently in conditional semantic image synthesis, which modulates the normalized activation with spatially-varying transformations learned from semantic layouts, to prevent the semantic information from being washed away. Despite its impressive performance, a more thorough understanding of the advantages inside the box is still highly demanded to help reduce the significant computation and parameter overhead introduced by this novel structure. In this paper, from a return-on-investment point of view, we conduct an in-depth analysis of the effectiveness of this spatially-adaptive normalization and observe that its modulation parameters benefit more from semantic-awareness rather than spatial-adaptiveness, especially for high-resolution input masks. Inspired by this observation, we propose class-adaptive normalization (CLADE), a lightweight but equally-effective variant that is only adaptive to semantic class. In order to further improve spatial-adaptiveness, we introduce intra-class positional map encoding calculated from semantic layouts to modulate the normalization parameters of CLADE and propose a truly spatially-adaptive variant of CLADE, namely CLADE-ICPE. \%Benefiting from this design, CLADE greatly reduces the computation cost while being able to preserve the semantic information in the generation. Through extensive experiments on multiple challenging datasets, we demonstrate that the proposed CLADE can be generalized to different SPADE-based methods while achieving comparable generation quality compared to SPADE, but it is much more efficient with fewer extra parameters and lower computational cost. The code is available at https://github.com/tzt101/CLADE.git},
archiveprefix = {arXiv},
eprint = {2012.04644},
eprinttype = {arxiv},
journal = {arXiv:2012.04644 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Graphics},
language = {English},
primaryclass = {cs}
}
@article{tanSemanticImageSynthesis2020,
title = {Semantic {{Image Synthesis}} via {{Efficient Class}}-{{Adaptive Normalization}}},
author = {Tan, Zhentao and Chen, Dongdong and Chu, Qi and Chai, Menglei and Liao, Jing and He, Mingming and Yuan, Lu and Hua, Gang and Yu, Nenghai},
year = {2020},
month = dec,
abstract = {Spatially-adaptive normalization (SPADE) is remarkably successful recently in conditional semantic image synthesis, which modulates the normalized activation with spatially-varying transformations learned from semantic layouts, to prevent the semantic information from being washed away. Despite its impressive performance, a more thorough understanding of the advantages inside the box is still highly demanded to help reduce the significant computation and parameter overhead introduced by this novel structure. In this paper, from a return-on-investment point of view, we conduct an in-depth analysis of the effectiveness of this spatially-adaptive normalization and observe that its modulation parameters benefit more from semantic-awareness rather than spatial-adaptiveness, especially for high-resolution input masks. Inspired by this observation, we propose class-adaptive normalization (CLADE), a lightweight but equally-effective variant that is only adaptive to semantic class. In order to further improve spatial-adaptiveness, we introduce intra-class positional map encoding calculated from semantic layouts to modulate the normalization parameters of CLADE and propose a truly spatially-adaptive variant of CLADE, namely CLADE-ICPE. \%Benefiting from this design, CLADE greatly reduces the computation cost while being able to preserve the semantic information in the generation. Through extensive experiments on multiple challenging datasets, we demonstrate that the proposed CLADE can be generalized to different SPADE-based methods while achieving comparable generation quality compared to SPADE, but it is much more efficient with fewer extra parameters and lower computational cost. The code is available at https://github.com/tzt101/CLADE.git},
archiveprefix = {arXiv},
eprint = {2012.04644},
eprinttype = {arxiv},
file = {/home/mike/Dropbox/bibliography/pdfs/Tan et al. - 2020 - Semantic Image Synthesis via Efficient Class-Adapt2.pdf},
journal = {arXiv:2012.04644 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Graphics},
language = {English},
primaryclass = {cs}
}
@article{thompsonMortalityAssociatedInfluenza2003,
title = {Mortality {{Associated With Influenza}} and {{Respiratory Syncytial Virus}} in the {{United States}}},
author = {Thompson, William W and Shay, David K and Weintraub, Eric and Brammer, Lynnette and Cox, Nancy and Anderson, Larry J and Fukuda, Keiji},
year = {2003},
month = oct,
pages = {8},
abstract = {Objective To develop a statistical model using national mortality and viral surveillance data to estimate annual influenza- and RSV-associated deaths in the United States, by age group, virus, and influenza type and subtype. Design, Setting, and Population Age-specific Poisson regression models using national viral surveillance data for the 1976-1977 through 1998-1999 seasons were used to estimate influenza-associated deaths. Influenza- and RSV-associated deaths were simultaneously estimated for the 1990-1991 through 1998-1999 seasons. Main Outcome Measures Attributable deaths for 3 categories: underlying pneumonia and influenza, underlying respiratory and circulatory, and all causes. Results Annual estimates of influenza-associated deaths increased significantly between the 1976-1977 and 1998-1999 seasons for all 3 death categories (PϽ.001 for each category). For the 1990-1991 through 1998-1999 seasons, the greatest mean numbers of deaths were associated with influenza A(H3N2) viruses, followed by RSV, influenza B, and influenza A(H1N1). Influenza viruses and RSV, respectively, were associated with annual means (SD) of 8097 (3084) and 2707 (196) underlying pneumonia and influenza deaths, 36155 (11055) and 11321 (668) underlying respiratory and circulatory deaths, and 51203 (15081) and 17358 (1086) all-cause deaths. For underlying respiratory and circulatory deaths, 90\% of influenza- and 78\% of RSV-associated deaths occurred among persons aged 65 years or older. Influenza was associated with more deaths than RSV in all age groups except for children younger than 1 year. On average, influenza was associated with 3 times as many deaths as RSV. Conclusions Mortality associated with both influenza and RSV circulation disproportionately affects elderly persons. Influenza deaths have increased substantially in the last 2 decades, in part because of aging of the population, underscoring the need for better prevention measures, including more effective vaccines and vaccination programs for elderly persons.},
language = {English}
}
@article{thompsonMortalityAssociatedInfluenza2003,
title = {Mortality {{Associated With Influenza}} and {{Respiratory Syncytial Virus}} in the {{United States}}},
author = {Thompson, William W and Shay, David K and Weintraub, Eric and Brammer, Lynnette and Cox, Nancy and Anderson, Larry J and Fukuda, Keiji},
year = {2003},
month = oct,
pages = {8},
abstract = {Objective To develop a statistical model using national mortality and viral surveillance data to estimate annual influenza- and RSV-associated deaths in the United States, by age group, virus, and influenza type and subtype. Design, Setting, and Population Age-specific Poisson regression models using national viral surveillance data for the 1976-1977 through 1998-1999 seasons were used to estimate influenza-associated deaths. Influenza- and RSV-associated deaths were simultaneously estimated for the 1990-1991 through 1998-1999 seasons. Main Outcome Measures Attributable deaths for 3 categories: underlying pneumonia and influenza, underlying respiratory and circulatory, and all causes. Results Annual estimates of influenza-associated deaths increased significantly between the 1976-1977 and 1998-1999 seasons for all 3 death categories (PϽ.001 for each category). For the 1990-1991 through 1998-1999 seasons, the greatest mean numbers of deaths were associated with influenza A(H3N2) viruses, followed by RSV, influenza B, and influenza A(H1N1). Influenza viruses and RSV, respectively, were associated with annual means (SD) of 8097 (3084) and 2707 (196) underlying pneumonia and influenza deaths, 36155 (11055) and 11321 (668) underlying respiratory and circulatory deaths, and 51203 (15081) and 17358 (1086) all-cause deaths. For underlying respiratory and circulatory deaths, 90\% of influenza- and 78\% of RSV-associated deaths occurred among persons aged 65 years or older. Influenza was associated with more deaths than RSV in all age groups except for children younger than 1 year. On average, influenza was associated with 3 times as many deaths as RSV. Conclusions Mortality associated with both influenza and RSV circulation disproportionately affects elderly persons. Influenza deaths have increased substantially in the last 2 decades, in part because of aging of the population, underscoring the need for better prevention measures, including more effective vaccines and vaccination programs for elderly persons.},
file = {/home/mike/Dropbox/bibliography/pdfs/Thompson et al_Mortality Associated With Influenza and Respiratory Syncytial Virus in the.pdf},
language = {English}
}
@article{tomicFluPRINTDatasetMultidimensional2019,
title = {The {{FluPRINT}} Dataset, a Multidimensional Analysis of the Influenza Vaccine Imprint on the Immune System},
author = {Tomic, Adriana and Tomic, Ivan and Dekker, Cornelia L. and Maecker, Holden T. and Davis, Mark M.},
year = {2019},
month = oct,
volume = {6},
pages = {214},
publisher = {{Nature Publishing Group}},
issn = {2052-4463},
doi = {10.1038/s41597-019-0213-4},
abstract = {Machine learning has the potential to identify novel biological factors underlying successful antibody responses to influenza vaccines. The first attempts have revealed a high level of complexity in establishing influenza immunity, and many different cellular and molecular components are involved. Of note is that the previously identified correlates of protection fail to account for the majority of individual responses across different age groups and influenza seasons. Challenges remain from the small sample sizes in most studies and from often limited data sets, such as transcriptomic data. Here we report the creation of a unified database, FluPRINT, to enable large-scale studies exploring the cellular and molecular underpinnings of successful antibody responses to influenza vaccines. Over 3,000 parameters were considered, including serological responses to influenza strains, serum cytokines, cell phenotypes, and cytokine stimulations. FluPRINT, facilitates the application of machine learning algorithms for data mining. The data are publicly available and represent a resource to uncover new markers and mechanisms that are important for influenza vaccine immunogenicity.},
copyright = {2019 The Author(s)},
journal = {Scientific Data},
language = {English},
number = {1}
}
@article{tomicFluPRINTDatasetMultidimensional2019,
title = {The {{FluPRINT}} Dataset, a Multidimensional Analysis of the Influenza Vaccine Imprint on the Immune System},
author = {Tomic, Adriana and Tomic, Ivan and Dekker, Cornelia L. and Maecker, Holden T. and Davis, Mark M.},
year = {2019},
month = oct,
volume = {6},
pages = {214},
publisher = {{Nature Publishing Group}},
issn = {2052-4463},
doi = {10.1038/s41597-019-0213-4},
abstract = {Machine learning has the potential to identify novel biological factors underlying successful antibody responses to influenza vaccines. The first attempts have revealed a high level of complexity in establishing influenza immunity, and many different cellular and molecular components are involved. Of note is that the previously identified correlates of protection fail to account for the majority of individual responses across different age groups and influenza seasons. Challenges remain from the small sample sizes in most studies and from often limited data sets, such as transcriptomic data. Here we report the creation of a unified database, FluPRINT, to enable large-scale studies exploring the cellular and molecular underpinnings of successful antibody responses to influenza vaccines. Over 3,000 parameters were considered, including serological responses to influenza strains, serum cytokines, cell phenotypes, and cytokine stimulations. FluPRINT, facilitates the application of machine learning algorithms for data mining. The data are publicly available and represent a resource to uncover new markers and mechanisms that are important for influenza vaccine immunogenicity.},
copyright = {2019 The Author(s)},
file = {/home/mike/Dropbox/bibliography/pdfs/Tomic et al_2019_The FluPRINT dataset, a multidimensional analysis of the influenza vaccine.pdf},
journal = {Scientific Data},
language = {English},
number = {1}
}
@article{tomicSIMONAutomatedMachine2019,
title = {{{SIMON}}, an Automated Machine Learning System Reveals Immune Signatures of Influenza Vaccine Responses},
author = {Tomic, Adriana and Tomic, Ivan and {Rosenberg-Hasson}, Yael and Dekker, Cornelia L. and Maecker, Holden T. and Davis, Mark M.},
year = {2019},
month = feb,
pages = {545186},
publisher = {{Cold Spring Harbor Laboratory}},
doi = {10.1101/545186},
abstract = {{$<$}h3{$>$}Abstract{$<$}/h3{$>$} {$<$}p{$>$}Machine learning holds considerable promise for understanding complex biological processes such as vaccine responses. Capturing interindividual variability is essential to increase the statistical power necessary for building more accurate predictive models. However, available approaches have difficulty coping with incomplete datasets which is often the case when combining studies. Additionally, there are hundreds of algorithms available and no simple way to find the optimal one. Here, we developed Sequential Iterative Modelling ``OverNight'' or SIMON, an automated machine learning system that compares results from 128 different algorithms and is particularly suitable for datasets containing many missing values. We applied SIMON to data from five clinical studies of seasonal influenza vaccination. The results reveal previously unrecognized CD4{$^+$} and CD8{$^+$} T cell subsets strongly associated with a robust antibody response to influenza antigens. These results demonstrate that SIMON can greatly speed up the choice of analysis modalities. Hence, it is a highly useful approach for data-driven hypothesis generation from disparate clinical datasets. Our strategy could be used to gain biological insight from ever-expanding heterogeneous datasets that are publicly available.{$<$}/p{$>$}},
chapter = {New Results},
copyright = {\textcopyright{} 2019, Posted by Cold Spring Harbor Laboratory. This pre-print is available under a Creative Commons License (Attribution-NonCommercial-NoDerivs 4.0 International), CC BY-NC-ND 4.0, as described at http://creativecommons.org/licenses/by-nc-nd/4.0/},
journal = {bioRxiv},
language = {English}
}
@article{tomicSIMONAutomatedMachine2019,
title = {{{SIMON}}, an Automated Machine Learning System Reveals Immune Signatures of Influenza Vaccine Responses},
author = {Tomic, Adriana and Tomic, Ivan and {Rosenberg-Hasson}, Yael and Dekker, Cornelia L. and Maecker, Holden T. and Davis, Mark M.},
year = {2019},
month = feb,
pages = {545186},
publisher = {{Cold Spring Harbor Laboratory}},
doi = {10.1101/545186},
abstract = {{$<$}h3{$>$}Abstract{$<$}/h3{$>$} {$<$}p{$>$}Machine learning holds considerable promise for understanding complex biological processes such as vaccine responses. Capturing interindividual variability is essential to increase the statistical power necessary for building more accurate predictive models. However, available approaches have difficulty coping with incomplete datasets which is often the case when combining studies. Additionally, there are hundreds of algorithms available and no simple way to find the optimal one. Here, we developed Sequential Iterative Modelling ``OverNight'' or SIMON, an automated machine learning system that compares results from 128 different algorithms and is particularly suitable for datasets containing many missing values. We applied SIMON to data from five clinical studies of seasonal influenza vaccination. The results reveal previously unrecognized CD4{$^+$} and CD8{$^+$} T cell subsets strongly associated with a robust antibody response to influenza antigens. These results demonstrate that SIMON can greatly speed up the choice of analysis modalities. Hence, it is a highly useful approach for data-driven hypothesis generation from disparate clinical datasets. Our strategy could be used to gain biological insight from ever-expanding heterogeneous datasets that are publicly available.{$<$}/p{$>$}},
chapter = {New Results},
copyright = {\textcopyright{} 2019, Posted by Cold Spring Harbor Laboratory. This pre-print is available under a Creative Commons License (Attribution-NonCommercial-NoDerivs 4.0 International), CC BY-NC-ND 4.0, as described at http://creativecommons.org/licenses/by-nc-nd/4.0/},
file = {/home/mike/Dropbox/bibliography/pdfs/Tomic et al_2019_SIMON, an automated machine learning system reveals immune signatures of.pdf},
journal = {bioRxiv},
language = {English}
}
@article{trieuLongtermMaintenanceInfluenzaSpecific2017,
title = {Long-Term {{Maintenance}} of the {{Influenza}}-{{Specific Cross}}-{{Reactive Memory CD4}}+ {{T}}-{{Cell Responses Following Repeated Annual Influenza Vaccination}}},
author = {Trieu, Mai-Chi and Zhou, Fan and Lartey, Sarah and {Jul-Larsen}, {\AA}sne and Mjaaland, Siri and Sridhar, Saranya and Cox, Rebecca Jane},
year = {2017},
month = mar,
volume = {215},
pages = {740--749},
publisher = {{Oxford Academic}},
issn = {0022-1899},
doi = {10.1093/infdis/jiw619},
abstract = {AbstractBackground.. Annual vaccination for healthcare workers and other high-risk groups is the mainstay of the public health strategy to combat influenza. Ina},
journal = {The Journal of Infectious Diseases},
language = {English},
number = {5}
}
@article{trieuLongtermMaintenanceInfluenzaSpecific2017,
title = {Long-Term {{Maintenance}} of the {{Influenza}}-{{Specific Cross}}-{{Reactive Memory CD4}}+ {{T}}-{{Cell Responses Following Repeated Annual Influenza Vaccination}}},
author = {Trieu, Mai-Chi and Zhou, Fan and Lartey, Sarah and {Jul-Larsen}, {\AA}sne and Mjaaland, Siri and Sridhar, Saranya and Cox, Rebecca Jane},
year = {2017},
month = mar,
volume = {215},
pages = {740--749},
publisher = {{Oxford Academic}},
issn = {0022-1899},
doi = {10.1093/infdis/jiw619},
abstract = {AbstractBackground.. Annual vaccination for healthcare workers and other high-risk groups is the mainstay of the public health strategy to combat influenza. Ina},
file = {/home/mike/Dropbox/bibliography/pdfs/Trieu et al_2017_Long-term Maintenance of the Influenza-Specific Cross-Reactive Memory CD4+.pdf},
journal = {The Journal of Infectious Diseases},
language = {English},
number = {5}
}
@article{tsangGlobalAnalysesHuman2014,
title = {Global {{Analyses}} of {{Human Immune Variation Reveal Baseline Predictors}} of {{Postvaccination Responses}}},
author = {Tsang, John S. and Schwartzberg, Pamela L. and Kotliarov, Yuri and Biancotto, Angelique and Xie, Zhi and Germain, Ronald N. and Wang, Ena and Olnes, Matthew J. and Narayanan, Manikandan and Golding, Hana and Moir, Susan and Dickler, Howard B. and Perl, Shira and Cheung, Foo and Obermoser, Gerlinde and Chaussabel, Damien and Palucka, Karolina and Chen, Jinguo and Fuchs, J. Christopher and Ho, Jason and Khurana, Surender and King, Lisa R. and Langweiler, Marc and Liu, Hui and Manischewitz, Jody and Pos, Zoltan and Posada, Jacqueline G. and Schum, Paula and Shi, Rongye and Valdez, Janet and Wang, Wei and Zhou, Huizhi and Kastner, Daniel L. and Marincola, Francesco M. and McCoy, J. Philip and Trinchieri, Giorgio and Young, Neal S.},
year = {2014},
month = apr,
volume = {157},
pages = {499--513},
issn = {0092-8674},
doi = {10.1016/j.cell.2014.03.031},
abstract = {A major goal of systems biology is the development of models that accurately predict responses to perturbation. Constructing such models requires the collection of dense measurements of system states, yet transformation of data into predictive constructs remains a challenge. To begin to model human immunity, we analyzed immune parameters in depth both at baseline and in response to influenza vaccination. Peripheral blood mononuclear cell transcriptomes, serum titers, cell subpopulation frequencies, and B cell responses were assessed in 63 individuals before and after vaccination and were used to develop a systematic framework to dissect inter- and intra-individual variation and build predictive models of postvaccination antibody responses. Strikingly, independent of age and pre-existing antibody titers, accurate models could be constructed using pre-perturbation cell populations alone, which were validated using independent baseline time points. Most of the parameters contributing to prediction delineated temporally stable baseline differences across individuals, raising the prospect of immune monitoring before intervention.},
journal = {Cell},
language = {English},
number = {2}
}
@article{tsangGlobalAnalysesHuman2014,
title = {Global {{Analyses}} of {{Human Immune Variation Reveal Baseline Predictors}} of {{Postvaccination Responses}}},
author = {Tsang, John S. and Schwartzberg, Pamela L. and Kotliarov, Yuri and Biancotto, Angelique and Xie, Zhi and Germain, Ronald N. and Wang, Ena and Olnes, Matthew J. and Narayanan, Manikandan and Golding, Hana and Moir, Susan and Dickler, Howard B. and Perl, Shira and Cheung, Foo and Obermoser, Gerlinde and Chaussabel, Damien and Palucka, Karolina and Chen, Jinguo and Fuchs, J. Christopher and Ho, Jason and Khurana, Surender and King, Lisa R. and Langweiler, Marc and Liu, Hui and Manischewitz, Jody and Pos, Zoltan and Posada, Jacqueline G. and Schum, Paula and Shi, Rongye and Valdez, Janet and Wang, Wei and Zhou, Huizhi and Kastner, Daniel L. and Marincola, Francesco M. and McCoy, J. Philip and Trinchieri, Giorgio and Young, Neal S.},
year = {2014},
month = apr,
volume = {157},
pages = {499--513},
issn = {0092-8674},
doi = {10.1016/j.cell.2014.03.031},
abstract = {A major goal of systems biology is the development of models that accurately predict responses to perturbation. Constructing such models requires the collection of dense measurements of system states, yet transformation of data into predictive constructs remains a challenge. To begin to model human immunity, we analyzed immune parameters in depth both at baseline and in response to influenza vaccination. Peripheral blood mononuclear cell transcriptomes, serum titers, cell subpopulation frequencies, and B cell responses were assessed in 63 individuals before and after vaccination and were used to develop a systematic framework to dissect inter- and intra-individual variation and build predictive models of postvaccination antibody responses. Strikingly, independent of age and pre-existing antibody titers, accurate models could be constructed using pre-perturbation cell populations alone, which were validated using independent baseline time points. Most of the parameters contributing to prediction delineated temporally stable baseline differences across individuals, raising the prospect of immune monitoring before intervention.},
file = {/home/mike/Dropbox/bibliography/pdfs/Tsang et al_2014_Global Analyses of Human Immune Variation Reveal Baseline Predictors of.pdf},
journal = {Cell},
language = {English},
number = {2}
}
@article{ulyanovInstanceNormalizationMissing2017,
title = {Instance {{Normalization}}: {{The Missing Ingredient}} for {{Fast Stylization}}},
shorttitle = {Instance {{Normalization}}},
author = {Ulyanov, Dmitry and Vedaldi, Andrea and Lempitsky, Victor},
year = {2017},
month = nov,
abstract = {It this paper we revisit the fast stylization method introduced in Ulyanov et al. (2016). We show how a small change in the stylization architecture results in a significant qualitative improvement in the generated images. The change is limited to swapping batch normalization with instance normalization, and to apply the latter both at training and testing times. The resulting method can be used to train high-performance architectures for real-time image generation. The code is available at https://github.com/DmitryUlyanov/texture\_nets. Full paper can be found at https://arxiv.org/abs/1701.02096.},
archiveprefix = {arXiv},
eprint = {1607.08022},
eprinttype = {arxiv},
journal = {arXiv:1607.08022 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{ulyanovInstanceNormalizationMissing2017,
title = {Instance {{Normalization}}: {{The Missing Ingredient}} for {{Fast Stylization}}},
shorttitle = {Instance {{Normalization}}},
author = {Ulyanov, Dmitry and Vedaldi, Andrea and Lempitsky, Victor},
year = {2017},
month = nov,
abstract = {It this paper we revisit the fast stylization method introduced in Ulyanov et al. (2016). We show how a small change in the stylization architecture results in a significant qualitative improvement in the generated images. The change is limited to swapping batch normalization with instance normalization, and to apply the latter both at training and testing times. The resulting method can be used to train high-performance architectures for real-time image generation. The code is available at https://github.com/DmitryUlyanov/texture\_nets. Full paper can be found at https://arxiv.org/abs/1701.02096.},
archiveprefix = {arXiv},
eprint = {1607.08022},
eprinttype = {arxiv},
journal = {arXiv:1607.08022 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{vandijkInvestigatingPrivacyTheory,
title = {Investigating {{Privacy Theory}} through {{Network Analysis}}},
author = {{van Dijk}, Friso},
pages = {30},
language = {English}
}
@article{vandijkInvestigatingPrivacyTheory,
title = {Investigating {{Privacy Theory}} through {{Network Analysis}}},
author = {{van Dijk}, Friso},
pages = {30},
language = {English}
}
@article{wangGenerativeImageModeling2016,
title = {Generative {{Image Modeling}} Using {{Style}} and {{Structure Adversarial Networks}}},
author = {Wang, Xiaolong and Gupta, Abhinav},
year = {2016},
month = jul,
abstract = {Current generative frameworks use end-to-end learning and generate images by sampling from uniform noise distribution. However, these approaches ignore the most basic principle of image formation: images are product of: (a) Structure: the underlying 3D model; (b) Style: the texture mapped onto structure. In this paper, we factorize the image generation process and propose Style and Structure Generative Adversarial Network (S2-GAN). Our S2-GAN has two components: the StructureGAN generates a surface normal map; the Style-GAN takes the surface normal map as input and generates the 2D image. Apart from a real vs. generated loss function, we use an additional loss with computed surface normals from generated images. The two GANs are first trained independently, and then merged together via joint learning. We show our S2-GAN model is interpretable, generates more realistic images and can be used to learn unsupervised RGBD representations.},
archiveprefix = {arXiv},
eprint = {1603.05631},
eprinttype = {arxiv},
journal = {arXiv:1603.05631 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{wangGenerativeImageModeling2016,
title = {Generative {{Image Modeling}} Using {{Style}} and {{Structure Adversarial Networks}}},
author = {Wang, Xiaolong and Gupta, Abhinav},
year = {2016},
month = jul,
abstract = {Current generative frameworks use end-to-end learning and generate images by sampling from uniform noise distribution. However, these approaches ignore the most basic principle of image formation: images are product of: (a) Structure: the underlying 3D model; (b) Style: the texture mapped onto structure. In this paper, we factorize the image generation process and propose Style and Structure Generative Adversarial Network (S2-GAN). Our S2-GAN has two components: the StructureGAN generates a surface normal map; the Style-GAN takes the surface normal map as input and generates the 2D image. Apart from a real vs. generated loss function, we use an additional loss with computed surface normals from generated images. The two GANs are first trained independently, and then merged together via joint learning. We show our S2-GAN model is interpretable, generates more realistic images and can be used to learn unsupervised RGBD representations.},
archiveprefix = {arXiv},
eprint = {1603.05631},
eprinttype = {arxiv},
journal = {arXiv:1603.05631 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{wangHighResolutionImageSynthesis2018,
title = {High-{{Resolution Image Synthesis}} and {{Semantic Manipulation}} with {{Conditional GANs}}},
author = {Wang, Ting-Chun and Liu, Ming-Yu and Zhu, Jun-Yan and Tao, Andrew and Kautz, Jan and Catanzaro, Bryan},
year = {2018},
month = aug,
abstract = {We present a new method for synthesizing high-resolution photo-realistic images from semantic label maps using conditional generative adversarial networks (conditional GANs). Conditional GANs have enabled a variety of applications, but the results are often limited to low-resolution and still far from realistic. In this work, we generate 2048x1024 visually appealing results with a novel adversarial loss, as well as new multi-scale generator and discriminator architectures. Furthermore, we extend our framework to interactive visual manipulation with two additional features. First, we incorporate object instance segmentation information, which enables object manipulations such as removing/adding objects and changing the object category. Second, we propose a method to generate diverse results given the same input, allowing users to edit the object appearance interactively. Human opinion studies demonstrate that our method significantly outperforms existing methods, advancing both the quality and the resolution of deep image synthesis and editing.},
archiveprefix = {arXiv},
eprint = {1711.11585},
eprinttype = {arxiv},
journal = {arXiv:1711.11585 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Graphics,Computer Science - Machine Learning},
language = {English},
primaryclass = {cs}
}
@article{wangHighResolutionImageSynthesis2018,
title = {High-{{Resolution Image Synthesis}} and {{Semantic Manipulation}} with {{Conditional GANs}}},
author = {Wang, Ting-Chun and Liu, Ming-Yu and Zhu, Jun-Yan and Tao, Andrew and Kautz, Jan and Catanzaro, Bryan},
year = {2018},
month = aug,
abstract = {We present a new method for synthesizing high-resolution photo-realistic images from semantic label maps using conditional generative adversarial networks (conditional GANs). Conditional GANs have enabled a variety of applications, but the results are often limited to low-resolution and still far from realistic. In this work, we generate 2048x1024 visually appealing results with a novel adversarial loss, as well as new multi-scale generator and discriminator architectures. Furthermore, we extend our framework to interactive visual manipulation with two additional features. First, we incorporate object instance segmentation information, which enables object manipulations such as removing/adding objects and changing the object category. Second, we propose a method to generate diverse results given the same input, allowing users to edit the object appearance interactively. Human opinion studies demonstrate that our method significantly outperforms existing methods, advancing both the quality and the resolution of deep image synthesis and editing.},
archiveprefix = {arXiv},
eprint = {1711.11585},
eprinttype = {arxiv},
journal = {arXiv:1711.11585 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Graphics,Computer Science - Machine Learning},
language = {English},
primaryclass = {cs}
}
@article{wangHighResolutionImageSynthesis2018a,
title = {High-{{Resolution Image Synthesis}} and {{Semantic Manipulation}} with {{Conditional GANs}}},
author = {Wang, Ting-Chun and Liu, Ming-Yu and Zhu, Jun-Yan and Tao, Andrew and Kautz, Jan and Catanzaro, Bryan},
year = {2018},
month = aug,
abstract = {We present a new method for synthesizing high-resolution photo-realistic images from semantic label maps using conditional generative adversarial networks (conditional GANs). Conditional GANs have enabled a variety of applications, but the results are often limited to low-resolution and still far from realistic. In this work, we generate 2048x1024 visually appealing results with a novel adversarial loss, as well as new multi-scale generator and discriminator architectures. Furthermore, we extend our framework to interactive visual manipulation with two additional features. First, we incorporate object instance segmentation information, which enables object manipulations such as removing/adding objects and changing the object category. Second, we propose a method to generate diverse results given the same input, allowing users to edit the object appearance interactively. Human opinion studies demonstrate that our method significantly outperforms existing methods, advancing both the quality and the resolution of deep image synthesis and editing.},
archiveprefix = {arXiv},
eprint = {1711.11585},
eprinttype = {arxiv},
journal = {arXiv:1711.11585 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Graphics,Computer Science - Machine Learning},
language = {English},
primaryclass = {cs}
}
@article{wangHighResolutionImageSynthesis2018a,
title = {High-{{Resolution Image Synthesis}} and {{Semantic Manipulation}} with {{Conditional GANs}}},
author = {Wang, Ting-Chun and Liu, Ming-Yu and Zhu, Jun-Yan and Tao, Andrew and Kautz, Jan and Catanzaro, Bryan},
year = {2018},
month = aug,
abstract = {We present a new method for synthesizing high-resolution photo-realistic images from semantic label maps using conditional generative adversarial networks (conditional GANs). Conditional GANs have enabled a variety of applications, but the results are often limited to low-resolution and still far from realistic. In this work, we generate 2048x1024 visually appealing results with a novel adversarial loss, as well as new multi-scale generator and discriminator architectures. Furthermore, we extend our framework to interactive visual manipulation with two additional features. First, we incorporate object instance segmentation information, which enables object manipulations such as removing/adding objects and changing the object category. Second, we propose a method to generate diverse results given the same input, allowing users to edit the object appearance interactively. Human opinion studies demonstrate that our method significantly outperforms existing methods, advancing both the quality and the resolution of deep image synthesis and editing.},
archiveprefix = {arXiv},
eprint = {1711.11585},
eprinttype = {arxiv},
journal = {arXiv:1711.11585 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Graphics,Computer Science - Machine Learning},
language = {English},
primaryclass = {cs}
}
@article{wangImageQualityAssessment2004,
title = {Image {{Quality Assessment}}: {{From Error Visibility}} to {{Structural Similarity}}},
author = {Wang, Zhou and Bovik, Alan Conrad and Sheikh, Hamid Rahim and Simoncelli, Eero P},
year = {2004},
volume = {13},
pages = {13},
journal = {IEEE TRANSACTIONS ON IMAGE PROCESSING},
language = {English},
number = {4}
}
@article{wangImageQualityAssessment2004,
title = {Image {{Quality Assessment}}: {{From Error Visibility}} to {{Structural Similarity}}},
author = {Wang, Zhou and Bovik, Alan Conrad and Sheikh, Hamid Rahim and Simoncelli, Eero P},
year = {2004},
volume = {13},
pages = {13},
file = {/home/mike/Dropbox/bibliography/pdfs/Wang et al_2004_Image Quality Assessment.pdf},
journal = {IEEE TRANSACTIONS ON IMAGE PROCESSING},
language = {English},
number = {4}
}
@article{WatchGreat2020,
title = {Watch {{The Great}} 2020 Full Movie on 123movies},
abstract = {Watch The Great Online Full Movie, The Great full hd with English subtitle.},
language = {English}
}
@article{WatchGreat2020,
title = {Watch {{The Great}} 2020 Full Movie on 123movies},
abstract = {Watch The Great Online Full Movie, The Great full hd with English subtitle.},
language = {English}
}
@book{wickhamDataScienceImport2017,
title = {R for {{Data Science}}: {{Import}}, {{Tidy}}, {{Transform}}, {{Visualize}}, and {{Model Data}}},
shorttitle = {R for {{Data Science}}},
author = {Wickham, Hadley and Grolemund, Garrett},
year = {2017},
edition = {1st},
publisher = {{O'Reilly Media, Inc.}},
abstract = {Learn how to use R to turn raw data into insight, knowledge, and understanding. This book introduces you to R, RStudio, and the tidyverse, a collection of R packages designed to work together to make data science fast, fluent, and fun. Suitable for readers with no previous programming experience, R for Data Science is designed to get you doing data science as quickly as possible. Authors Hadley Wickham and Garrett Grolemund guide you through the steps of importing, wrangling, exploring, and modeling your data and communicating the results. Youll get a complete, big-picture understanding of the data science cycle, along with basic tools you need to manage the details. Each section of the book is paired with exercises to help you practice what youve learned along the way. Youll learn how to: Wrangletransform your datasets into a form convenient for analysisProgramlearn powerful R tools for solving data problems with greater clarity and easeExploreexamine your data, generate hypotheses, and quickly test themModelprovide a low-dimensional summary that captures true "signals" in your datasetCommunicatelearn R Markdown for integrating prose, code, and results},
isbn = {978-1-4919-1039-9}
}
@book{wickhamDataScienceImport2017,
title = {R for {{Data Science}}: {{Import}}, {{Tidy}}, {{Transform}}, {{Visualize}}, and {{Model Data}}},
shorttitle = {R for {{Data Science}}},
author = {Wickham, Hadley and Grolemund, Garrett},
year = {2017},
edition = {1st},
publisher = {{O'Reilly Media, Inc.}},
abstract = {Learn how to use R to turn raw data into insight, knowledge, and understanding. This book introduces you to R, RStudio, and the tidyverse, a collection of R packages designed to work together to make data science fast, fluent, and fun. Suitable for readers with no previous programming experience, R for Data Science is designed to get you doing data science as quickly as possible. Authors Hadley Wickham and Garrett Grolemund guide you through the steps of importing, wrangling, exploring, and modeling your data and communicating the results. Youll get a complete, big-picture understanding of the data science cycle, along with basic tools you need to manage the details. Each section of the book is paired with exercises to help you practice what youve learned along the way. Youll learn how to: Wrangletransform your datasets into a form convenient for analysisProgramlearn powerful R tools for solving data problems with greater clarity and easeExploreexamine your data, generate hypotheses, and quickly test themModelprovide a low-dimensional summary that captures true "signals" in your datasetCommunicatelearn R Markdown for integrating prose, code, and results},
isbn = {978-1-4919-1039-9}
}
@article{wuImageLabelingMarkov2019,
title = {Image {{Labeling}} with {{Markov Random Fields}} and {{Conditional Random Fields}}},
author = {Wu, Shangxuan and Weng, Xinshuo},
year = {2019},
month = may,
abstract = {Most existing methods for object segmentation in computer vision are formulated as a labeling task. This, in general, could be transferred to a pixel-wise label assignment task, which is quite similar to the structure of hidden Markov random field. In terms of Markov random field, each pixel can be regarded as a state and has a transition probability to its neighbor pixel, the label behind each pixel is a latent variable and has an emission probability from its corresponding state. In this paper, we reviewed several modern image labeling methods based on Markov random field and conditional random Field. And we compare the result of these methods with some classical image labeling methods. The experiment demonstrates that the introduction of Markov random field and conditional random field make a big difference in the segmentation result.},
archiveprefix = {arXiv},
eprint = {1811.11323},
eprinttype = {arxiv},
journal = {arXiv:1811.11323 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{wuImageLabelingMarkov2019,
title = {Image {{Labeling}} with {{Markov Random Fields}} and {{Conditional Random Fields}}},
author = {Wu, Shangxuan and Weng, Xinshuo},
year = {2019},
month = may,
abstract = {Most existing methods for object segmentation in computer vision are formulated as a labeling task. This, in general, could be transferred to a pixel-wise label assignment task, which is quite similar to the structure of hidden Markov random field. In terms of Markov random field, each pixel can be regarded as a state and has a transition probability to its neighbor pixel, the label behind each pixel is a latent variable and has an emission probability from its corresponding state. In this paper, we reviewed several modern image labeling methods based on Markov random field and conditional random Field. And we compare the result of these methods with some classical image labeling methods. The experiment demonstrates that the introduction of Markov random field and conditional random field make a big difference in the segmentation result.},
archiveprefix = {arXiv},
eprint = {1811.11323},
eprinttype = {arxiv},
journal = {arXiv:1811.11323 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{yumakLecture10Convolutional,
title = {Lecture 10: {{Convolutional Neural Networks}}},
author = {Yumak, Zerrin},
pages = {100},
language = {English}
}
@article{yumakLecture10Convolutional,
title = {Lecture 10: {{Convolutional Neural Networks}}},
author = {Yumak, Zerrin},
pages = {100},
language = {English}
}
@article{yumakLecture11Recurrent,
title = {Lecture 11: {{Recurrent Neural Networks}}},
author = {Yumak, Zerrin},
pages = {113},
language = {English}
}
@article{yumakLecture11Recurrent,
title = {Lecture 11: {{Recurrent Neural Networks}}},
author = {Yumak, Zerrin},
pages = {113},
language = {English}
}
@article{zhangContextEncodingSemantic2018,
title = {Context {{Encoding}} for {{Semantic Segmentation}}},
author = {Zhang, Hang and Dana, Kristin and Shi, Jianping and Zhang, Zhongyue and Wang, Xiaogang and Tyagi, Ambrish and Agrawal, Amit},
year = {2018},
month = mar,
abstract = {Recent work has made significant progress in improving spatial resolution for pixelwise labeling with Fully Convolutional Network (FCN) framework by employing Dilated/Atrous convolution, utilizing multi-scale features and refining boundaries. In this paper, we explore the impact of global contextual information in semantic segmentation by introducing the Context Encoding Module, which captures the semantic context of scenes and selectively highlights class-dependent featuremaps. The proposed Context Encoding Module significantly improves semantic segmentation results with only marginal extra computation cost over FCN. Our approach has achieved new state-of-theart results 51.7\% mIoU on PASCAL-Context, 85.9\% mIoU on PASCAL VOC 2012. Our single model achieves a final score of 0.5567 on ADE20K test set, which surpasses the winning entry of COCO-Place Challenge 2017. In addition, we also explore how the Context Encoding Module can improve the feature representation of relatively shallow networks for the image classification on CIFAR-10 dataset. Our 14 layer network has achieved an error rate of 3.45\%, which is comparable with state-of-the-art approaches with over 10\texttimes{} more layers. The source code for the complete system are publicly available1.},
archiveprefix = {arXiv},
eprint = {1803.08904},
eprinttype = {arxiv},
journal = {arXiv:1803.08904 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{zhangContextEncodingSemantic2018,
title = {Context {{Encoding}} for {{Semantic Segmentation}}},
author = {Zhang, Hang and Dana, Kristin and Shi, Jianping and Zhang, Zhongyue and Wang, Xiaogang and Tyagi, Ambrish and Agrawal, Amit},
year = {2018},
month = mar,
abstract = {Recent work has made significant progress in improving spatial resolution for pixelwise labeling with Fully Convolutional Network (FCN) framework by employing Dilated/Atrous convolution, utilizing multi-scale features and refining boundaries. In this paper, we explore the impact of global contextual information in semantic segmentation by introducing the Context Encoding Module, which captures the semantic context of scenes and selectively highlights class-dependent featuremaps. The proposed Context Encoding Module significantly improves semantic segmentation results with only marginal extra computation cost over FCN. Our approach has achieved new state-of-theart results 51.7\% mIoU on PASCAL-Context, 85.9\% mIoU on PASCAL VOC 2012. Our single model achieves a final score of 0.5567 on ADE20K test set, which surpasses the winning entry of COCO-Place Challenge 2017. In addition, we also explore how the Context Encoding Module can improve the feature representation of relatively shallow networks for the image classification on CIFAR-10 dataset. Our 14 layer network has achieved an error rate of 3.45\%, which is comparable with state-of-the-art approaches with over 10\texttimes{} more layers. The source code for the complete system are publicly available1.},
archiveprefix = {arXiv},
eprint = {1803.08904},
eprinttype = {arxiv},
journal = {arXiv:1803.08904 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{zhangContextEncodingSemantic2018a,
title = {Context {{Encoding}} for {{Semantic Segmentation}}},
author = {Zhang, Hang and Dana, Kristin and Shi, Jianping and Zhang, Zhongyue and Wang, Xiaogang and Tyagi, Ambrish and Agrawal, Amit},
year = {2018},
month = mar,
abstract = {Recent work has made significant progress in improving spatial resolution for pixelwise labeling with Fully Convolutional Network (FCN) framework by employing Dilated/Atrous convolution, utilizing multi-scale features and refining boundaries. In this paper, we explore the impact of global contextual information in semantic segmentation by introducing the Context Encoding Module, which captures the semantic context of scenes and selectively highlights class-dependent featuremaps. The proposed Context Encoding Module significantly improves semantic segmentation results with only marginal extra computation cost over FCN. Our approach has achieved new state-of-theart results 51.7\% mIoU on PASCAL-Context, 85.9\% mIoU on PASCAL VOC 2012. Our single model achieves a final score of 0.5567 on ADE20K test set, which surpasses the winning entry of COCO-Place Challenge 2017. In addition, we also explore how the Context Encoding Module can improve the feature representation of relatively shallow networks for the image classification on CIFAR-10 dataset. Our 14 layer network has achieved an error rate of 3.45\%, which is comparable with state-of-the-art approaches with over 10\texttimes{} more layers. The source code for the complete system are publicly available1.},
archiveprefix = {arXiv},
eprint = {1803.08904},
eprinttype = {arxiv},
journal = {arXiv:1803.08904 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{zhangContextEncodingSemantic2018a,
title = {Context {{Encoding}} for {{Semantic Segmentation}}},
author = {Zhang, Hang and Dana, Kristin and Shi, Jianping and Zhang, Zhongyue and Wang, Xiaogang and Tyagi, Ambrish and Agrawal, Amit},
year = {2018},
month = mar,
abstract = {Recent work has made significant progress in improving spatial resolution for pixelwise labeling with Fully Convolutional Network (FCN) framework by employing Dilated/Atrous convolution, utilizing multi-scale features and refining boundaries. In this paper, we explore the impact of global contextual information in semantic segmentation by introducing the Context Encoding Module, which captures the semantic context of scenes and selectively highlights class-dependent featuremaps. The proposed Context Encoding Module significantly improves semantic segmentation results with only marginal extra computation cost over FCN. Our approach has achieved new state-of-theart results 51.7\% mIoU on PASCAL-Context, 85.9\% mIoU on PASCAL VOC 2012. Our single model achieves a final score of 0.5567 on ADE20K test set, which surpasses the winning entry of COCO-Place Challenge 2017. In addition, we also explore how the Context Encoding Module can improve the feature representation of relatively shallow networks for the image classification on CIFAR-10 dataset. Our 14 layer network has achieved an error rate of 3.45\%, which is comparable with state-of-the-art approaches with over 10\texttimes{} more layers. The source code for the complete system are publicly available1.},
archiveprefix = {arXiv},
eprint = {1803.08904},
eprinttype = {arxiv},
journal = {arXiv:1803.08904 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{zhangOverviewOnlineFake2020,
title = {An {{Overview}} of {{Online Fake News}}: {{Characterization}}, {{Detection}}, and {{Discussion}}},
author = {Zhang, Xichen and Ghorbani, Ali A.},
year = {2020},
volume = {57},
pages = {102025},
issn = {0306-4573},
doi = {10.1016/j.ipm.2019.03.004},
abstract = {Over the recent years, the growth of online social media has greatly facilitated the way people communicate with each other. Users of online social media share information, connect with other people and stay informed about trending events. However, much recent information appearing on social media is dubious and, in some cases, intended to mislead. Such content is often called fake news. Large amounts of online fake news has the potential to cause serious problems in society. Many point to the 2016 U.S. presidential election campaign as having been influenced by fake news. Subsequent to this election, the term has entered the mainstream vernacular. Moreover it has drawn the attention of industry and academia, seeking to understand its origins, distribution and effects. Of critical interest is the ability to detect when online content is untrue and intended to mislead. This is technically challenging for several reasons. Using social media tools, content is easily generated and quickly spread, leading to a large volume of content to analyse. Online information is very diverse, covering a large number of subjects, which contributes complexity to this task. The truth and intent of any statement often cannot be assessed by computers alone, so efforts must depend on collaboration between humans and technology. For instance, some content that is deemed by experts of being false and intended to mislead are available. While these sources are in limited supply, they can form a basis for such a shared effort. In this survey, we present a comprehensive overview of the finding to date relating to fake news. We characterize the negative impact of online fake news, and the state-of-the-art in detection methods. Many of these rely on identifying features of the users, content, and context that indicate misinformation. We also study existing datasets that have been used for classifying fake news. Finally, we propose promising research directions for online fake news analysis.},
journal = {Information Processing \& Management},
keywords = {Fake news detection,Online fake news,Social media},
number = {2}
}
@article{zhangOverviewOnlineFake2020,
title = {An {{Overview}} of {{Online Fake News}}: {{Characterization}}, {{Detection}}, and {{Discussion}}},
author = {Zhang, Xichen and Ghorbani, Ali A.},
year = {2020},
volume = {57},
pages = {102025},
issn = {0306-4573},
doi = {10.1016/j.ipm.2019.03.004},
abstract = {Over the recent years, the growth of online social media has greatly facilitated the way people communicate with each other. Users of online social media share information, connect with other people and stay informed about trending events. However, much recent information appearing on social media is dubious and, in some cases, intended to mislead. Such content is often called fake news. Large amounts of online fake news has the potential to cause serious problems in society. Many point to the 2016 U.S. presidential election campaign as having been influenced by fake news. Subsequent to this election, the term has entered the mainstream vernacular. Moreover it has drawn the attention of industry and academia, seeking to understand its origins, distribution and effects. Of critical interest is the ability to detect when online content is untrue and intended to mislead. This is technically challenging for several reasons. Using social media tools, content is easily generated and quickly spread, leading to a large volume of content to analyse. Online information is very diverse, covering a large number of subjects, which contributes complexity to this task. The truth and intent of any statement often cannot be assessed by computers alone, so efforts must depend on collaboration between humans and technology. For instance, some content that is deemed by experts of being false and intended to mislead are available. While these sources are in limited supply, they can form a basis for such a shared effort. In this survey, we present a comprehensive overview of the finding to date relating to fake news. We characterize the negative impact of online fake news, and the state-of-the-art in detection methods. Many of these rely on identifying features of the users, content, and context that indicate misinformation. We also study existing datasets that have been used for classifying fake news. Finally, we propose promising research directions for online fake news analysis.},
journal = {Information Processing \& Management},
keywords = {Fake news detection,Online fake news,Social media},
number = {2}
}
@article{zhangSelfAttentionGenerativeAdversarial2019,
title = {Self-{{Attention Generative Adversarial Networks}}},
author = {Zhang, Han and Goodfellow, Ian and Metaxas, Dimitris and Odena, Augustus},
year = {2019},
month = jun,
abstract = {In this paper, we propose the Self-Attention Generative Adversarial Network (SAGAN) which allows attention-driven, long-range dependency modeling for image generation tasks. Traditional convolutional GANs generate high-resolution details as a function of only spatially local points in lower-resolution feature maps. In SAGAN, details can be generated using cues from all feature locations. Moreover, the discriminator can check that highly detailed features in distant portions of the image are consistent with each other. Furthermore, recent work has shown that generator conditioning affects GAN performance. Leveraging this insight, we apply spectral normalization to the GAN generator and find that this improves training dynamics. The proposed SAGAN performs better than prior work1, boosting the best published Inception score from 36.8 to 52.52 and reducing Fre\textasciiacute{} chet Inception distance from 27.62 to 18.65 on the challenging ImageNet dataset. Visualization of the attention layers shows that the generator leverages neighborhoods that correspond to object shapes rather than local regions of fixed shape.},
archiveprefix = {arXiv},
eprint = {1805.08318},
eprinttype = {arxiv},
journal = {arXiv:1805.08318 [cs, stat]},
keywords = {Computer Science - Machine Learning,Statistics - Machine Learning},
language = {English},
primaryclass = {cs, stat}
}
@article{zhangSelfAttentionGenerativeAdversarial2019,
title = {Self-{{Attention Generative Adversarial Networks}}},
author = {Zhang, Han and Goodfellow, Ian and Metaxas, Dimitris and Odena, Augustus},
year = {2019},
month = jun,
abstract = {In this paper, we propose the Self-Attention Generative Adversarial Network (SAGAN) which allows attention-driven, long-range dependency modeling for image generation tasks. Traditional convolutional GANs generate high-resolution details as a function of only spatially local points in lower-resolution feature maps. In SAGAN, details can be generated using cues from all feature locations. Moreover, the discriminator can check that highly detailed features in distant portions of the image are consistent with each other. Furthermore, recent work has shown that generator conditioning affects GAN performance. Leveraging this insight, we apply spectral normalization to the GAN generator and find that this improves training dynamics. The proposed SAGAN performs better than prior work1, boosting the best published Inception score from 36.8 to 52.52 and reducing Fre\textasciiacute{} chet Inception distance from 27.62 to 18.65 on the challenging ImageNet dataset. Visualization of the attention layers shows that the generator leverages neighborhoods that correspond to object shapes rather than local regions of fixed shape.},
archiveprefix = {arXiv},
eprint = {1805.08318},
eprinttype = {arxiv},
journal = {arXiv:1805.08318 [cs, stat]},
keywords = {Computer Science - Machine Learning,Statistics - Machine Learning},
language = {English},
primaryclass = {cs, stat}
}
@article{zhangStackGANTextPhotorealistic2017,
title = {{{StackGAN}}: {{Text}} to {{Photo}}-{{Realistic Image Synthesis}} with {{Stacked Generative Adversarial Networks}}},
shorttitle = {{{StackGAN}}},
author = {Zhang, Han and Xu, Tao and Li, Hongsheng and Zhang, Shaoting and Wang, Xiaogang and Huang, Xiaolei and Metaxas, Dimitris},
year = {2017},
month = aug,
abstract = {Synthesizing high-quality images from text descriptions is a challenging problem in computer vision and has many practical applications. Samples generated by existing textto-image approaches can roughly reflect the meaning of the given descriptions, but they fail to contain necessary details and vivid object parts. In this paper, we propose Stacked Generative Adversarial Networks (StackGAN) to generate 256\texttimes 256 photo-realistic images conditioned on text descriptions. We decompose the hard problem into more manageable sub-problems through a sketch-refinement process. The Stage-I GAN sketches the primitive shape and colors of the object based on the given text description, yielding Stage-I low-resolution images. The Stage-II GAN takes Stage-I results and text descriptions as inputs, and generates high-resolution images with photo-realistic details. It is able to rectify defects in Stage-I results and add compelling details with the refinement process. To improve the diversity of the synthesized images and stabilize the training of the conditional-GAN, we introduce a novel Conditioning Augmentation technique that encourages smoothness in the latent conditioning manifold. Extensive experiments and comparisons with state-of-the-arts on benchmark datasets demonstrate that the proposed method achieves significant improvements on generating photo-realistic images conditioned on text descriptions.},
archiveprefix = {arXiv},
eprint = {1612.03242},
eprinttype = {arxiv},
journal = {arXiv:1612.03242 [cs, stat]},
keywords = {Computer Science - Artificial Intelligence,Computer Science - Computer Vision and Pattern Recognition,Statistics - Machine Learning},
language = {English},
primaryclass = {cs, stat}
}
@article{zhangStackGANTextPhotorealistic2017,
title = {{{StackGAN}}: {{Text}} to {{Photo}}-{{Realistic Image Synthesis}} with {{Stacked Generative Adversarial Networks}}},
shorttitle = {{{StackGAN}}},
author = {Zhang, Han and Xu, Tao and Li, Hongsheng and Zhang, Shaoting and Wang, Xiaogang and Huang, Xiaolei and Metaxas, Dimitris},
year = {2017},
month = aug,
abstract = {Synthesizing high-quality images from text descriptions is a challenging problem in computer vision and has many practical applications. Samples generated by existing textto-image approaches can roughly reflect the meaning of the given descriptions, but they fail to contain necessary details and vivid object parts. In this paper, we propose Stacked Generative Adversarial Networks (StackGAN) to generate 256\texttimes 256 photo-realistic images conditioned on text descriptions. We decompose the hard problem into more manageable sub-problems through a sketch-refinement process. The Stage-I GAN sketches the primitive shape and colors of the object based on the given text description, yielding Stage-I low-resolution images. The Stage-II GAN takes Stage-I results and text descriptions as inputs, and generates high-resolution images with photo-realistic details. It is able to rectify defects in Stage-I results and add compelling details with the refinement process. To improve the diversity of the synthesized images and stabilize the training of the conditional-GAN, we introduce a novel Conditioning Augmentation technique that encourages smoothness in the latent conditioning manifold. Extensive experiments and comparisons with state-of-the-arts on benchmark datasets demonstrate that the proposed method achieves significant improvements on generating photo-realistic images conditioned on text descriptions.},
archiveprefix = {arXiv},
eprint = {1612.03242},
eprinttype = {arxiv},
journal = {arXiv:1612.03242 [cs, stat]},
keywords = {Computer Science - Artificial Intelligence,Computer Science - Computer Vision and Pattern Recognition,Statistics - Machine Learning},
language = {English},
primaryclass = {cs, stat}
}
@article{zhouHospitalizationsAssociatedInfluenza2012,
title = {Hospitalizations {{Associated With Influenza}} and {{Respiratory Syncytial Virus}} in the {{United States}}, 1993\textendash 2008},
author = {Zhou, Hong and Thompson, William W. and Viboud, Cecile G. and Ringholz, Corinne M. and Cheng, Po-Yung and Steiner, Claudia and Abedi, Glen R. and Anderson, Larry J. and Brammer, Lynnette and Shay, David K.},
year = {2012},
month = may,
volume = {54},
pages = {1427--1436},
issn = {1058-4838},
doi = {10.1093/cid/cis211},
abstract = {Background.\hspace{1em}Age-specific comparisons of influenza and respiratory syncytial virus (RSV) hospitalization rates can inform prevention efforts, including vaccine development plans. Previous US studies have not estimated jointly the burden of these viruses using similar data sources and over many seasons.Methods.\hspace{1em}We estimated influenza and RSV hospitalizations in 5 age categories (\<1, 1\textendash 4, 5\textendash 49, 50\textendash 64, and {$\geq$}65 years) with data for 13 states from 1993\textendash 1994 through 2007\textendash 2008. For each state and age group, we estimated the contribution of influenza and RSV to hospitalizations for respiratory and circulatory disease by using negative binomial regression models that incorporated weekly influenza and RSV surveillance data as covariates.Results.\hspace{1em}Mean rates of influenza and RSV hospitalizations were 63.5 (95\% confidence interval [CI], 37.5\textendash 237) and 55.3 (95\% CI, 44.4\textendash 107) per 100000 person-years, respectively. The highest hospitalization rates for influenza were among persons aged {$\geq$}65 years (309/100000; 95\% CI, 186\textendash 1100) and those aged \<1 year (151/100000; 95\% CI, 151\textendash 660). For RSV, children aged \<1 year had the highest hospitalization rate (2350/100000; 95\% CI, 2220\textendash 2520) followed by those aged 1\textendash 4 years (178/100000; 95\% CI, 155\textendash 230). Age-standardized annual rates per 100000 person-years varied substantially for influenza (33\textendash 100) but less for RSV (42\textendash 77).Conclusions.\hspace{1em}Overall US hospitalization rates for influenza and RSV are similar; however, their age-specific burdens differ dramatically. Our estimates are consistent with those from previous studies focusing either on influenza or RSV. Our approach provides robust national comparisons of hospitalizations associated with these 2 viral respiratory pathogens by age group and over time.},
journal = {Clinical Infectious Diseases},
number = {10}
}
@article{zhouHospitalizationsAssociatedInfluenza2012,
title = {Hospitalizations {{Associated With Influenza}} and {{Respiratory Syncytial Virus}} in the {{United States}}, 1993\textendash 2008},
author = {Zhou, Hong and Thompson, William W. and Viboud, Cecile G. and Ringholz, Corinne M. and Cheng, Po-Yung and Steiner, Claudia and Abedi, Glen R. and Anderson, Larry J. and Brammer, Lynnette and Shay, David K.},
year = {2012},
month = may,
volume = {54},
pages = {1427--1436},
issn = {1058-4838},
doi = {10.1093/cid/cis211},
abstract = {Background.\hspace{1em}Age-specific comparisons of influenza and respiratory syncytial virus (RSV) hospitalization rates can inform prevention efforts, including vaccine development plans. Previous US studies have not estimated jointly the burden of these viruses using similar data sources and over many seasons.Methods.\hspace{1em}We estimated influenza and RSV hospitalizations in 5 age categories (\<1, 1\textendash 4, 5\textendash 49, 50\textendash 64, and {$\geq$}65 years) with data for 13 states from 1993\textendash 1994 through 2007\textendash 2008. For each state and age group, we estimated the contribution of influenza and RSV to hospitalizations for respiratory and circulatory disease by using negative binomial regression models that incorporated weekly influenza and RSV surveillance data as covariates.Results.\hspace{1em}Mean rates of influenza and RSV hospitalizations were 63.5 (95\% confidence interval [CI], 37.5\textendash 237) and 55.3 (95\% CI, 44.4\textendash 107) per 100000 person-years, respectively. The highest hospitalization rates for influenza were among persons aged {$\geq$}65 years (309/100000; 95\% CI, 186\textendash 1100) and those aged \<1 year (151/100000; 95\% CI, 151\textendash 660). For RSV, children aged \<1 year had the highest hospitalization rate (2350/100000; 95\% CI, 2220\textendash 2520) followed by those aged 1\textendash 4 years (178/100000; 95\% CI, 155\textendash 230). Age-standardized annual rates per 100000 person-years varied substantially for influenza (33\textendash 100) but less for RSV (42\textendash 77).Conclusions.\hspace{1em}Overall US hospitalization rates for influenza and RSV are similar; however, their age-specific burdens differ dramatically. Our estimates are consistent with those from previous studies focusing either on influenza or RSV. Our approach provides robust national comparisons of hospitalizations associated with these 2 viral respiratory pathogens by age group and over time.},
file = {/home/mike/Dropbox/bibliography/pdfs/Zhou et al_2012_Hospitalizations Associated With Influenza and Respiratory Syncytial Virus in.pdf},
journal = {Clinical Infectious Diseases},
number = {10}
}
@article{zhuSEANImageSynthesis2020,
title = {{{SEAN}}: {{Image Synthesis}} with {{Semantic Region}}-{{Adaptive Normalization}}},
shorttitle = {{{SEAN}}},
author = {Zhu, Peihao and Abdal, Rameen and Qin, Yipeng and Wonka, Peter},
year = {2020},
month = jun,
pages = {5103--5112},
doi = {10.1109/CVPR42600.2020.00515},
abstract = {We propose semantic region-adaptive normalization (SEAN), a simple but effective building block for Generative Adversarial Networks conditioned on segmentation masks that describe the semantic regions in the desired output image. Using SEAN normalization, we can build a network architecture that can control the style of each semantic region individually, e.g., we can specify one style reference image per region. SEAN is better suited to encode, transfer, and synthesize style than the best previous method in terms of reconstruction quality, variability, and visual quality. We evaluate SEAN on multiple datasets and report better quantitative metrics (e.g. FID, PSNR) than the current state of the art. SEAN also pushes the frontier of interactive image editing. We can interactively edit images by changing segmentation masks or the style for any given region. We can also interpolate styles from two reference images per region.},
archiveprefix = {arXiv},
eprint = {1911.12861},
eprinttype = {arxiv},
journal = {2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Graphics,Electrical Engineering and Systems Science - Image and Video Processing},
language = {English}
}
@article{zhuSEANImageSynthesis2020,
title = {{{SEAN}}: {{Image Synthesis}} with {{Semantic Region}}-{{Adaptive Normalization}}},
shorttitle = {{{SEAN}}},
author = {Zhu, Peihao and Abdal, Rameen and Qin, Yipeng and Wonka, Peter},
year = {2020},
month = jun,
pages = {5103--5112},
doi = {10.1109/CVPR42600.2020.00515},
abstract = {We propose semantic region-adaptive normalization (SEAN), a simple but effective building block for Generative Adversarial Networks conditioned on segmentation masks that describe the semantic regions in the desired output image. Using SEAN normalization, we can build a network architecture that can control the style of each semantic region individually, e.g., we can specify one style reference image per region. SEAN is better suited to encode, transfer, and synthesize style than the best previous method in terms of reconstruction quality, variability, and visual quality. We evaluate SEAN on multiple datasets and report better quantitative metrics (e.g. FID, PSNR) than the current state of the art. SEAN also pushes the frontier of interactive image editing. We can interactively edit images by changing segmentation masks or the style for any given region. We can also interpolate styles from two reference images per region.},
archiveprefix = {arXiv},
eprint = {1911.12861},
eprinttype = {arxiv},
file = {/home/mike/Dropbox/bibliography/pdfs/Zhu et al. - 2020 - SEAN Image Synthesis with Semantic Region-Adaptiv.pdf},
journal = {2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Graphics,Electrical Engineering and Systems Science - Image and Video Processing},
language = {English}
}
@article{zhuSemanticallyMultiModalImage2020,
title = {Semantically {{Multi}}-{{Modal Image Synthesis}}},
author = {Zhu, Zhen and Xu, Zhiliang and You, Ansheng and Bai, Xiang},
year = {2020},
month = apr,
abstract = {In this paper, we focus on semantically multi-modal image synthesis (SMIS) task, namely, generating multi-modal images at the semantic level. Previous work seeks to use multiple class-specific generators, constraining its usage in datasets with a small number of classes. We instead propose a novel Group Decreasing Network (GroupDNet) that leverages group convolutions in the generator and progressively decreases the group numbers of the convolutions in the decoder. Consequently, GroupDNet is armed with much more controllability on translating semantic labels to natural images and has plausible high-quality yields for datasets with many classes. Experiments on several challenging datasets demonstrate the superiority of GroupDNet on performing the SMIS task. We also show that GroupDNet is capable of performing a wide range of interesting synthesis applications. Codes and models are available at: https://github.com/Seanseattle/SMIS.},
archiveprefix = {arXiv},
eprint = {2003.12697},
eprinttype = {arxiv},
journal = {arXiv:2003.12697 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{zhuSemanticallyMultiModalImage2020,
title = {Semantically {{Multi}}-{{Modal Image Synthesis}}},
author = {Zhu, Zhen and Xu, Zhiliang and You, Ansheng and Bai, Xiang},
year = {2020},
month = apr,
abstract = {In this paper, we focus on semantically multi-modal image synthesis (SMIS) task, namely, generating multi-modal images at the semantic level. Previous work seeks to use multiple class-specific generators, constraining its usage in datasets with a small number of classes. We instead propose a novel Group Decreasing Network (GroupDNet) that leverages group convolutions in the generator and progressively decreases the group numbers of the convolutions in the decoder. Consequently, GroupDNet is armed with much more controllability on translating semantic labels to natural images and has plausible high-quality yields for datasets with many classes. Experiments on several challenging datasets demonstrate the superiority of GroupDNet on performing the SMIS task. We also show that GroupDNet is capable of performing a wide range of interesting synthesis applications. Codes and models are available at: https://github.com/Seanseattle/SMIS.},
archiveprefix = {arXiv},
eprint = {2003.12697},
eprinttype = {arxiv},
journal = {arXiv:2003.12697 [cs]},
keywords = {Computer Science - Computer Vision and Pattern Recognition},
language = {English},
primaryclass = {cs}
}
@article{zimmermannPredictingDefectsEclipse2007,
title = {Predicting {{Defects}} for {{Eclipse}}},
author = {Zimmermann, Thomas and Premraj, Rahul and Zeller, Andreas},
year = {2007},
month = may,
publisher = {{IEEE}},
doi = {10.1109/promise.2007.10},
isbn = {0769529542},
journal = {Third International Workshop on Predictor Models in Software Engineering (PROMISE'07: ICSE Workshops 2007)}
}
@article{zimmermannPredictingDefectsEclipse2007,
title = {Predicting {{Defects}} for {{Eclipse}}},
author = {Zimmermann, Thomas and Premraj, Rahul and Zeller, Andreas},
year = {2007},
month = may,
publisher = {{IEEE}},
doi = {10.1109/promise.2007.10},
isbn = {0769529542},
journal = {Third International Workshop on Predictor Models in Software Engineering (PROMISE'07: ICSE Workshops 2007)}
}
@article{L_demann_2006,
doi = {10.1080/02841850500539033},
url = {https://doi.org/10.1080%2F02841850500539033},
year = 2006,
month = {apr},
publisher = {{SAGE} Publications},
volume = {47},
number = {3},
pages = {303--310},
author = {L. Lüdemann and W. Grieger and R. Wurm and P. Wust and C. Zimmer},
title = {Glioma assessment using quantitative blood volume maps generated by T1-weighted dynamic contrast-enhanced magnetic resonance imaging: a receiver operating characteristic study},
journal = {Acta Radiologica}
}
@article{Zhang_2019,
doi = {10.1016/j.virol.2019.08.023},
url = {https://doi.org/10.1016%2Fj.virol.2019.08.023},
year = 2019,
month = {nov},
publisher = {Elsevier {BV}},
volume = {537},
pages = {110--120},
author = {Shouping Zhang and Caiyun Huo and Jin Xiao and Tao Fan and Shumei Zou and Peng Qi and Lunquan Sun and Ming Wang and Yanxin Hu},
title = {p-{STAT}1 regulates the influenza A virus replication and inflammatory response in vitro and vivo},
journal = {Virology}
}
@article{Zhang_2018,
doi = {10.1016/j.jep.2018.01.005},
url = {https://doi.org/10.1016%2Fj.jep.2018.01.005},
year = 2018,
month = {apr},
publisher = {Elsevier {BV}},
volume = {215},
pages = {156--166},
author = {Huan-Huan Zhang and Wen-Ying Yu and Lan Li and Fang Wu and Qin Chen and Yang Yang and Chen-Huan Yu},
title = {Protective effects of diketopiperazines from Moslae Herba against influenza A virus-induced pulmonary inflammation via inhibition of viral replication and platelets aggregation},
journal = {Journal of Ethnopharmacology}
}
@article{Papin_2004,
doi = {10.1529/biophysj.103.029884},
url = {https://doi.org/10.1529%2Fbiophysj.103.029884},
year = 2004,
month = {jul},
publisher = {Elsevier {BV}},
volume = {87},
number = {1},
pages = {37--46},
author = {Jason A. Papin and Bernhard O. Palsson},
title = {The {JAK}-{STAT} Signaling Network in the Human B-Cell: An Extreme Signaling Pathway Analysis},
journal = {Biophysical Journal}
}
@article{Cantrell_2015,
doi = {10.1101/cshperspect.a018788},
url = {https://doi.org/10.1101%2Fcshperspect.a018788},
year = 2015,
month = {jun},
publisher = {Cold Spring Harbor Laboratory},
volume = {7},
number = {6},
pages = {a018788},
author = {Doreen Cantrell},
title = {Signaling in Lymphocyte Activation},
journal = {Cold Spring Harbor Perspectives in Biology}
}
Resource not found.
@article{Toapanta_2012,
doi = {10.3389/fcimb.2012.00128},
url = {https://doi.org/10.3389%2Ffcimb.2012.00128},
year = 2012,
publisher = {Frontiers Media {SA}},
volume = {2},
author = {Franklin R. Toapanta and Paula J. Bernal and Marcelo B. Sztein},
title = {Diverse phosphorylation patterns of B cell receptor-associated signaling in naïve and memory human B cells revealed by phosphoflow, a powerful technique to study signaling at the single cell level},
journal = {Frontiers in Cellular and Infection Microbiology}
}
@article{van_den_Berg_2019,
doi = {10.1007/s00430-019-00602-z},
url = {https://doi.org/10.1007%2Fs00430-019-00602-z},
year = 2019,
month = {apr},
publisher = {Springer Science and Business Media {LLC}},
volume = {208},
number = {3-4},
pages = {305--321},
author = {S. P. H. van den Berg and K. Warmink and J. A. M. Borghans and M. J. Knol and D. van Baarle},
title = {Effect of latent cytomegalovirus infection on the antibody response to influenza vaccination: a systematic review and meta-analysis},
journal = {Medical Microbiology and Immunology}
}
|