summaryrefslogtreecommitdiff
path: root/Presentation/references.bib
blob: a977e6ffeff7bf44d0da98203eb31edb9007602a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
@ARTICLE{726791,
  author={Lecun, Y. and Bottou, L. and Bengio, Y. and Haffner, P.},
  journal={Proceedings of the IEEE}, 
  title={Gradient-based learning applied to document recognition}, 
  year={1998},
  volume={86},
  number={11},
  pages={2278-2324},
  doi={10.1109/5.726791}
}

@article{DBLP:journals/corr/SzeCESZ16,
  author    = {Vivienne Sze and
               Yu{-}Hsin Chen and
               Joel S. Emer and
               Amr Suleiman and
               Zhengdong Zhang},
  title     = {Hardware for Machine Learning: Challenges and Opportunities},
  journal   = {CoRR},
  volume    = {abs/1612.07625},
  year      = {2016},
  url       = {http://arxiv.org/abs/1612.07625},
  eprinttype = {arXiv},
  eprint    = {1612.07625},
  timestamp = {Wed, 11 Dec 2019 16:23:12 +0100},
  biburl    = {https://dblp.org/rec/journals/corr/SzeCESZ16.bib},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}

@article{DBLP:journals/corr/SuleimanZS16,
  author    = {Amr Suleiman and
               Zhengdong Zhang and
               Vivienne Sze},
  title     = {A 58.6mW Real-Time Programmable Object Detector with Multi-Scale Multi-Object
               Support Using Deformable Parts Model on 1920x1080 Video at 30fps},
  journal   = {CoRR},
  volume    = {abs/1607.08635},
  year      = {2016},
  url       = {http://arxiv.org/abs/1607.08635},
  eprinttype = {arXiv},
  eprint    = {1607.08635},
  timestamp = {Wed, 11 Dec 2019 16:23:12 +0100},
  biburl    = {https://dblp.org/rec/journals/corr/SuleimanZS16.bib},
  bibsource = {dblp computer science bibliography, https://dblp.org}
}

@inproceedings{10.5555/3045118.3045361,
author = {Chen, Wenlin and Wilson, James T. and Tyree, Stephen and Weinberger, Kilian Q. and Chen, Yixin},
title = {Compressing Neural Networks with the Hashing Trick},
year = {2015},
publisher = {JMLR.org},
abstract = {As deep nets are increasingly used in applications suited for mobile devices, a fundamental dilemma becomes apparent: the trend in deep learning is to grow models to absorb ever-increasing data set sizes; however mobile devices are designed with very little memory and cannot store such large models. We present a novel network architecture, HashedNets, that exploits inherent redundancy in neural networks to achieve drastic reductions in model sizes. HashedNets uses a low-cost hash function to randomly group connection weights into hash buckets, and all connections within the same hash bucket share a single parameter value. These parameters are tuned to adjust to the HashedNets weight sharing architecture with standard backprop during training. Our hashing procedure introduces no additional memory overhead, and we demonstrate on several benchmark data sets that HashedNets shrink the storage requirements of neural networks substantially while mostly preserving generalization performance.},
booktitle = {Proceedings of the 32nd International Conference on International Conference on Machine Learning - Volume 37},
pages = {22852294},
numpages = {10},
location = {Lille, France},
series = {ICML'15}
}

@inproceedings{10.1109/ISCA.2016.30,
author = {Han, Song and Liu, Xingyu and Mao, Huizi and Pu, Jing and Pedram, Ardavan and Horowitz, Mark A. and Dally, William J.},
title = {EIE: Efficient Inference Engine on Compressed Deep Neural Network},
year = {2016},
isbn = {9781467389471},
publisher = {IEEE Press},
url = {https://doi.org/10.1109/ISCA.2016.30},
doi = {10.1109/ISCA.2016.30},
abstract = {State-of-the-art deep neural networks (DNNs) have hundreds of millions of connections and are both computationally and memory intensive, making them difficult to deploy on embedded systems with limited hardware resources and power budgets. While custom hardware helps the computation, fetching weights from DRAM is two orders of magnitude more expensive than ALU operations, and dominates the required power.Previously proposed 'Deep Compression' makes it possible to fit large DNNs (AlexNet and VGGNet) fully in on-chip SRAM. This compression is achieved by pruning the redundant connections and having multiple connections share the same weight. We propose an energy efficient inference engine (EIE) that performs inference on this compressed network model and accelerates the resulting sparse matrix-vector multiplication with weight sharing. Going from DRAM to SRAM gives EIE 120\texttimes{} energy saving; Exploiting sparsity saves 10\texttimes{}; Weight sharing gives 8\texttimes{}; Skipping zero activations from ReLU saves another 3\texttimes{}. Evaluated on nine DNN benchmarks, EIE is 189\texttimes{} and 13\texttimes{} faster when compared to CPU and GPU implementations of the same DNN without compression. EIE has a processing power of 102 GOPS working directly on a compressed network, corresponding to 3 TOPS on an uncompressed network, and processes FC layers of AlexNet at 1.88\texttimes{}104 frames/sec with a power dissipation of only 600mW. It is 24,000\texttimes{} and 3,400\texttimes{} more energy efficient than a CPU and GPU respectively. Compared with DaDianNao, EIE has 2.9\texttimes{}, 19\texttimes{} and 3\texttimes{} better throughput, energy efficiency and area efficiency.},
booktitle = {Proceedings of the 43rd International Symposium on Computer Architecture},
pages = {243254},
numpages = {12},
keywords = {hardware acceleration, ASIC, algorithm-hardware co-design, model compression, deep learning},
location = {Seoul, Republic of Korea},
series = {ISCA '16}
}

@article{Han2015DeepCC,
  title={Deep Compression: Compressing Deep Neural Network with Pruning, Trained Quantization and Huffman Coding},
  author={Song Han and Huizi Mao and William J. Dally},
  journal={arXiv: Computer Vision and Pattern Recognition},
  year={2015}
}

@inproceedings{NIPS2015_ae0eb3ee,
 author = {Han, Song and Pool, Jeff and Tran, John and Dally, William},
 booktitle = {Advances in Neural Information Processing Systems},
 editor = {C. Cortes and N. Lawrence and D. Lee and M. Sugiyama and R. Garnett},
 pages = {},
 publisher = {Curran Associates, Inc.},
 title = {Learning both Weights and Connections for Efficient Neural Network},
 url = {https://proceedings.neurips.cc/paper/2015/file/ae0eb3eed39d2bcef4622b2499a05fe6-Paper.pdf},
 volume = {28},
 year = {2015}
}

@inproceedings{NIPS1989_6c9882bb,
 author = {LeCun, Yann and Denker, John and Solla, Sara},
 booktitle = {Advances in Neural Information Processing Systems},
 editor = {D. Touretzky},
 pages = {},
 publisher = {Morgan-Kaufmann},
 title = {Optimal Brain Damage},
 url = {https://proceedings.neurips.cc/paper/1989/file/6c9882bbac1c7093bd25041881277658-Paper.pdf},
 volume = {2},
 year = {1989}
}

@ARTICLE{8704878,
author = {X. Dai and H. Yin and N. K. Jha},
journal = {IEEE Transactions on Computers},
title = {NeST: A Neural Network Synthesis Tool Based on a Grow-and-Prune Paradigm},
year = {2019},
volume = {68},
number = {10},
issn = {1557-9956},
pages = {1487-1497},
keywords = {neurons;computer architecture;training;biological neural networks;tools;manganese;correlation},
doi = {10.1109/TC.2019.2914438},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
month = {oct}
}

@inproceedings{NIPS1992_303ed4c6,
 author = {Hassibi, Babak and Stork, David},
 booktitle = {Advances in Neural Information Processing Systems},
 editor = {S. Hanson and J. Cowan and C. Giles},
 pages = {},
 publisher = {Morgan-Kaufmann},
 title = {Second order derivatives for network pruning: Optimal Brain Surgeon},
 url = {https://proceedings.neurips.cc/paper/1992/file/303ed4c69846ab36c2904d3ba8573050-Paper.pdf},
 volume = {5},
 year = {1992}
}