darknet.js 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176
  1. var darknet = darknet || {};
  2. var text = text || require('./text');
  3. darknet.ModelFactory = class {
  4. match(context) {
  5. const identifier = context.identifier;
  6. const extension = identifier.split('.').pop().toLowerCase();
  7. switch (extension) {
  8. case 'weights':
  9. if (darknet.Weights.open(context.stream)) {
  10. return 'darknet.weights';
  11. }
  12. break;
  13. default:
  14. try {
  15. const reader = text.Reader.open(context.stream.peek(), 65536);
  16. for (;;) {
  17. const line = reader.read();
  18. if (line === undefined) {
  19. break;
  20. }
  21. const content = line.trim();
  22. if (content.length === 0 || content.startsWith('#')) {
  23. continue;
  24. }
  25. if (content.startsWith('[') && content.endsWith(']')) {
  26. return 'darknet.model';
  27. }
  28. return undefined;
  29. }
  30. }
  31. catch (err) {
  32. // continue regardless of error
  33. }
  34. break;
  35. }
  36. return undefined;
  37. }
  38. open(context, match) {
  39. return context.metadata('darknet-metadata.json').then((metadata) => {
  40. const openModel = (metadata, cfg, weights) => {
  41. return new darknet.Model(metadata, cfg, darknet.Weights.open(weights));
  42. };
  43. const identifier = context.identifier;
  44. const parts = identifier.split('.');
  45. parts.pop();
  46. const basename = parts.join('.');
  47. switch (match) {
  48. case 'darknet.weights':
  49. return context.request(basename + '.cfg', null).then((stream) => {
  50. const buffer = stream.read();
  51. return openModel(metadata, buffer, context.stream);
  52. });
  53. case 'darknet.model':
  54. return context.request(basename + '.weights', null).then((stream) => {
  55. return openModel(metadata, context.stream.peek(), stream);
  56. }).catch(() => {
  57. return openModel(metadata, context.stream.peek(), null);
  58. });
  59. default: {
  60. throw new darknet.Error("Unsupported Darknet format '" + match + "'.");
  61. }
  62. }
  63. });
  64. }
  65. };
  66. darknet.Model = class {
  67. constructor(metadata, cfg, weights) {
  68. this._graphs = [ new darknet.Graph(metadata, cfg, weights) ];
  69. }
  70. get format() {
  71. return 'Darknet';
  72. }
  73. get graphs() {
  74. return this._graphs;
  75. }
  76. };
  77. darknet.Graph = class {
  78. constructor(metadata, cfg, weights) {
  79. this._inputs = [];
  80. this._outputs = [];
  81. this._nodes = [];
  82. // read_cfg
  83. const sections = [];
  84. let section = null;
  85. const reader = text.Reader.open(cfg);
  86. let lineNumber = 0;
  87. for (;;) {
  88. lineNumber++;
  89. const content = reader.read();
  90. if (content === undefined) {
  91. break;
  92. }
  93. const line = content.replace(/\s/g, '');
  94. if (line.length > 0) {
  95. switch (line[0]) {
  96. case '#':
  97. case ';':
  98. break;
  99. case '[': {
  100. const type = line[line.length - 1] === ']' ? line.substring(1, line.length - 1) : line.substring(1);
  101. section = {
  102. line: lineNumber,
  103. type: type,
  104. options: {}
  105. };
  106. sections.push(section);
  107. break;
  108. }
  109. default: {
  110. if (!section || line[0] < 0x20 || line[0] > 0x7E) {
  111. throw new darknet.Error("Invalid cfg '" + content.replace(/[^\x20-\x7E]+/g, '?').trim() + "' at line " + lineNumber.toString() + ".");
  112. }
  113. const index = line.indexOf('=');
  114. if (index < 0) {
  115. throw new darknet.Error("Invalid cfg '" + content.replace(/[^\x20-\x7E]+/g, '?').trim() + "' at line " + lineNumber.toString() + ".");
  116. }
  117. const key = line.substring(0, index);
  118. const value = line.substring(index + 1);
  119. section.options[key] = value;
  120. break;
  121. }
  122. }
  123. }
  124. }
  125. const option_find_int = (options, key, defaultValue) => {
  126. let value = options[key];
  127. if (typeof value === 'string' && value.startsWith('$')) {
  128. const key = value.substring(1);
  129. value = globals.has(key) ? globals.get(key) : value;
  130. }
  131. if (value !== undefined) {
  132. const number = parseInt(value, 10);
  133. if (!Number.isInteger(number)) {
  134. throw new darknet.Error("Invalid int option '" + JSON.stringify(options[key]) + "'.");
  135. }
  136. return number;
  137. }
  138. return defaultValue;
  139. };
  140. const option_find_str = (options, key, defaultValue) => {
  141. const value = options[key];
  142. return value !== undefined ? value : defaultValue;
  143. };
  144. const make_shape = (dimensions, source) => {
  145. if (dimensions.some((dimension) => dimension === 0 || dimension === undefined || isNaN(dimension))) {
  146. throw new darknet.Error("Invalid tensor shape '" + JSON.stringify(dimensions) + "' in '" + source + "'.");
  147. }
  148. return new darknet.TensorShape(dimensions);
  149. };
  150. const load_weights = (name, shape, visible) => {
  151. const data = weights ? weights.read(4 * shape.reduce((a, b) => a * b, 1)) : null;
  152. const type = new darknet.TensorType('float32', make_shape(shape, 'load_weights'));
  153. const initializer = new darknet.Tensor(type, data);
  154. const argument = new darknet.Argument('', null, initializer);
  155. return new darknet.Parameter(name, visible === false ? false : true, [ argument ]);
  156. };
  157. const load_batch_normalize_weights = (layer, prefix, size) => {
  158. layer.weights.push(load_weights(prefix + 'scale', [ size ], prefix === ''));
  159. layer.weights.push(load_weights(prefix + 'mean', [ size ], prefix === ''));
  160. layer.weights.push(load_weights(prefix + 'variance', [ size ], prefix === ''));
  161. };
  162. const make_convolutional_layer = (layer, prefix, w, h, c, n, groups, size, stride_x, stride_y, padding, batch_normalize) => {
  163. layer.out_w = Math.floor((w + 2 * padding - size) / stride_x) + 1;
  164. layer.out_h = Math.floor((h + 2 * padding - size) / stride_y) + 1;
  165. layer.out_c = n;
  166. layer.out = layer.out_w * layer.out_h * layer.out_c;
  167. layer.weights.push(load_weights(prefix + 'biases', [ n ], prefix === ''));
  168. if (batch_normalize) {
  169. if (prefix) {
  170. load_batch_normalize_weights(layer, prefix, n);
  171. }
  172. else {
  173. const batchnorm_layer = { weights: [] };
  174. load_batch_normalize_weights(batchnorm_layer, prefix, n);
  175. layer.chain.push({ type: 'batchnorm', layer: batchnorm_layer });
  176. }
  177. }
  178. layer.weights.push(load_weights(prefix + 'weights', [ Math.floor(c / groups), n, size, size ], prefix === ''));
  179. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'make_convolutional_layer'));
  180. };
  181. const make_connected_layer = (layer, prefix, inputs, outputs, batch_normalize) => {
  182. layer.out_h = 1;
  183. layer.out_w = 1;
  184. layer.out_c = outputs;
  185. layer.out = outputs;
  186. layer.weights.push(load_weights(prefix + 'biases', [ outputs ], prefix === ''));
  187. if (batch_normalize) {
  188. if (prefix) {
  189. load_batch_normalize_weights(layer, prefix, outputs);
  190. }
  191. else {
  192. const batchnorm_layer = { weights: [] };
  193. load_batch_normalize_weights(batchnorm_layer, prefix, outputs);
  194. layer.chain.push({ type: 'batchnorm', layer: batchnorm_layer });
  195. }
  196. }
  197. layer.weights.push(load_weights(prefix + 'weights', [ inputs, outputs ], prefix === ''));
  198. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ outputs ], 'make_connected_layer'));
  199. };
  200. if (sections.length === 0) {
  201. throw new darknet.Error('Config file has no sections.');
  202. }
  203. const params = {};
  204. const globals = new Map();
  205. const net = sections.shift();
  206. switch (net.type) {
  207. case 'net':
  208. case 'network': {
  209. params.h = option_find_int(net.options, 'height', 0);
  210. params.w = option_find_int(net.options, 'width', 0);
  211. params.c = option_find_int(net.options, 'channels', 0);
  212. params.inputs = option_find_int(net.options, 'inputs', params.h * params.w * params.c);
  213. for (const key of Object.keys(net.options)) {
  214. globals.set(key, net.options[key]);
  215. }
  216. break;
  217. }
  218. default: {
  219. throw new darknet.Error("Unexpected '[" + net.type + "]' section. First section must be [net] or [network].");
  220. }
  221. }
  222. const inputType = params.w && params.h && params.c ?
  223. new darknet.TensorType('float32', make_shape([ params.w, params.h, params.c ], 'params-if')) :
  224. new darknet.TensorType('float32', make_shape([ params.inputs ], 'params-else'));
  225. const inputName = 'input';
  226. params.arguments = [ new darknet.Argument(inputName, inputType, null) ];
  227. this._inputs.push(new darknet.Parameter(inputName, true, params.arguments));
  228. for (let i = 0; i < sections.length; i++) {
  229. const section = sections[i];
  230. section.name = i.toString();
  231. section.layer = {
  232. inputs: [],
  233. weights: [],
  234. outputs: [ new darknet.Argument(section.name, null, null) ],
  235. chain: []
  236. };
  237. }
  238. let infer = true;
  239. for (let i = 0; i < sections.length; i++) {
  240. const section = sections[i];
  241. const options = section.options;
  242. const layer = section.layer;
  243. layer.inputs.push(...params.arguments);
  244. switch (section.type) {
  245. case 'shortcut': {
  246. let remove = true;
  247. const from = options.from ? options.from.split(',').map((item) => Number.parseInt(item.trim(), 10)) : [];
  248. for (const route of from) {
  249. const index = route < 0 ? i + route : route;
  250. const exists = index >= 0 && index < sections.length;
  251. remove = exists && remove;
  252. if (exists) {
  253. const source = sections[index].layer;
  254. layer.inputs.push(source.outputs[0]);
  255. }
  256. }
  257. if (remove) {
  258. delete options.from;
  259. }
  260. break;
  261. }
  262. case 'sam':
  263. case 'scale_channels': {
  264. const from = option_find_int(options, 'from', 0);
  265. const index = from < 0 ? i + from : from;
  266. if (index >= 0 && index < sections.length) {
  267. const source = sections[index].layer;
  268. layer.from = source;
  269. layer.inputs.push(source.outputs[0]);
  270. delete options.from;
  271. }
  272. break;
  273. }
  274. case 'route': {
  275. layer.inputs = [];
  276. layer.layers = [];
  277. let remove = true;
  278. const routes = options.layers ? options.layers.split(',').map((route) => Number.parseInt(route.trim(), 10)) : [];
  279. for (const route of routes) {
  280. const index = route < 0 ? i + route : route;
  281. const exists = index >= 0 && index < sections.length;
  282. remove = exists && remove;
  283. if (exists) {
  284. const source = sections[index].layer;
  285. layer.inputs.push(source.outputs[0]);
  286. layer.layers.push(source);
  287. }
  288. }
  289. if (remove) {
  290. delete options.layers;
  291. }
  292. break;
  293. }
  294. default:
  295. break;
  296. }
  297. if (infer) {
  298. switch (section.type) {
  299. case 'conv':
  300. case 'convolutional':
  301. case 'deconvolutional': {
  302. const shape = layer.inputs[0].type.shape.dimensions;
  303. if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) {
  304. throw new darknet.Error('Layer before convolutional layer must output image.');
  305. }
  306. const size = option_find_int(options, 'size', 1);
  307. const n = option_find_int(options, 'filters', 1);
  308. const pad = option_find_int(options, 'pad', 0);
  309. const padding = pad ? (size >> 1) : option_find_int(options, 'padding', 0);
  310. let stride_x = option_find_int(options, 'stride_x', -1);
  311. let stride_y = option_find_int(options, 'stride_y', -1);
  312. if (stride_x < 1 || stride_y < 1) {
  313. const stride = option_find_int(options, 'stride', 1);
  314. stride_x = stride_x < 1 ? stride : stride_x;
  315. stride_y = stride_y < 1 ? stride : stride_y;
  316. }
  317. const groups = option_find_int(options, 'groups', 1);
  318. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  319. const activation = option_find_str(options, 'activation', 'logistic');
  320. make_convolutional_layer(layer, '', params.w, params.h, params.c, n, groups, size, stride_x, stride_y, padding, batch_normalize);
  321. if (activation !== 'logistic' && activation !== 'none') {
  322. layer.chain.push({ type: activation });
  323. }
  324. break;
  325. }
  326. case 'connected': {
  327. const outputs = option_find_int(options, 'output', 1);
  328. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  329. const activation = option_find_str(options, 'activation', 'logistic');
  330. make_connected_layer(layer, '', params.inputs, outputs, batch_normalize);
  331. if (activation !== 'logistic' && activation !== 'none') {
  332. layer.chain.push({ type: activation });
  333. }
  334. break;
  335. }
  336. case 'local': {
  337. const shape = layer.inputs[0].type.shape.dimensions;
  338. if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) {
  339. throw new darknet.Error('Layer before avgpool layer must output image.');
  340. }
  341. const n = option_find_int(options, 'filters' , 1);
  342. const size = option_find_int(options, 'size', 1);
  343. const stride = option_find_int(options, 'stride', 1);
  344. const pad = option_find_int(options, 'pad', 0);
  345. const activation = option_find_str(options, 'activation', 'logistic');
  346. layer.out_h = Math.floor((params.h - (pad ? 1 : size)) / stride) + 1;
  347. layer.out_w = Math.floor((params.w - (pad ? 1 : size)) / stride) + 1;
  348. layer.out_c = n;
  349. layer.out = layer.out_w * layer.out_h * layer.out_c;
  350. layer.weights.push(load_weights('weights', [ params.c, n, size, size, layer.out_h * layer.out_w ]));
  351. layer.weights.push(load_weights('biases',[ layer.out_w * layer.out_h * layer.out_c ]));
  352. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'local'));
  353. if (activation !== 'logistic' && activation !== 'none') {
  354. layer.chain.push({ type: activation });
  355. }
  356. break;
  357. }
  358. case 'batchnorm': {
  359. layer.out_h = params.h;
  360. layer.out_w = params.w;
  361. layer.out_c = params.c;
  362. layer.out = layer.in;
  363. load_batch_normalize_weights(layer, '', layer.out_c);
  364. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'batchnorm'));
  365. break;
  366. }
  367. case 'activation': {
  368. layer.out_h = params.h;
  369. layer.out_w = params.w;
  370. layer.out_c = params.c;
  371. layer.out = layer.in;
  372. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'activation'));
  373. break;
  374. }
  375. case 'max':
  376. case 'maxpool': {
  377. const shape = layer.inputs[0].type.shape.dimensions;
  378. if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) {
  379. throw new darknet.Error('Layer before maxpool layer must output image.');
  380. }
  381. const antialiasing = option_find_int(options, 'antialiasing', 0);
  382. const stride = option_find_int(options, 'stride', 1);
  383. const blur_stride_x = option_find_int(options, 'stride_x', stride);
  384. const blur_stride_y = option_find_int(options, 'stride_y', stride);
  385. const stride_x = antialiasing ? 1 : blur_stride_x;
  386. const stride_y = antialiasing ? 1 : blur_stride_y;
  387. const size = option_find_int(options, 'size', stride);
  388. const padding = option_find_int(options, 'padding', size - 1);
  389. const out_channels = option_find_int(options, 'out_channels', 1);
  390. const maxpool_depth = option_find_int(options, 'maxpool_depth', 0);
  391. if (maxpool_depth) {
  392. layer.out_c = out_channels;
  393. layer.out_w = params.w;
  394. layer.out_h = params.h;
  395. }
  396. else {
  397. layer.out_w = Math.floor((params.w + padding - size) / stride_x) + 1;
  398. layer.out_h = Math.floor((params.h + padding - size) / stride_y) + 1;
  399. layer.out_c = params.c;
  400. }
  401. if (antialiasing) {
  402. const blur_size = antialiasing === 2 ? 2 : 3;
  403. const blur_pad = antialiasing === 2 ? 0 : Math.floor(blur_size / 3);
  404. layer.input_layer = { weights: [], outputs: layer.outputs, chain: [] };
  405. make_convolutional_layer(layer.input_layer, '', layer.out_h, layer.out_w, layer.out_c, layer.out_c, layer.out_c, blur_size, blur_stride_x, blur_stride_y, blur_pad, 0);
  406. layer.out_w = layer.input_layer.out_w;
  407. layer.out_h = layer.input_layer.out_h;
  408. layer.out_c = layer.input_layer.out_c;
  409. }
  410. else {
  411. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'maxpool'));
  412. }
  413. layer.out = layer.out_w * layer.out_h * layer.out_c;
  414. break;
  415. }
  416. case 'avgpool': {
  417. const shape = layer.inputs[0].type.shape.dimensions;
  418. if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) {
  419. throw new darknet.Error('Layer before avgpool layer must output image.');
  420. }
  421. layer.out_w = 1;
  422. layer.out_h = 1;
  423. layer.out_c = params.c;
  424. layer.out = layer.out_c;
  425. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'avgpool'));
  426. break;
  427. }
  428. case 'crnn': {
  429. const size = option_find_int(options, 'size', 3);
  430. const stride = option_find_int(options, 'stride', 1);
  431. const output_filters = option_find_int(options, 'output', 1);
  432. const hidden_filters = option_find_int(options, 'hidden', 1);
  433. const groups = option_find_int(options, 'groups', 1);
  434. const pad = option_find_int(options, 'pad', 0);
  435. const padding = pad ? (size >> 1) : option_find_int(options, 'padding', 0);
  436. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  437. layer.input_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  438. make_convolutional_layer(layer.input_layer, 'input_', params.h, params.w, params.c, hidden_filters, groups, size, stride, stride, padding, batch_normalize);
  439. layer.self_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  440. make_convolutional_layer(layer.self_layer, 'self_', params.h, params.w, hidden_filters, hidden_filters, groups, size, stride, stride, padding, batch_normalize);
  441. layer.output_layer = { weights: [], outputs: layer.outputs, chain: [] };
  442. make_convolutional_layer(layer.output_layer, 'output_', params.h, params.w, hidden_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  443. layer.weights = layer.weights.concat(layer.input_layer.weights);
  444. layer.weights = layer.weights.concat(layer.self_layer.weights);
  445. layer.weights = layer.weights.concat(layer.output_layer.weights);
  446. layer.out_h = layer.output_layer.out_h;
  447. layer.out_w = layer.output_layer.out_w;
  448. layer.out_c = output_filters;
  449. layer.out = layer.output_layer.out;
  450. break;
  451. }
  452. case 'rnn': {
  453. const outputs = option_find_int(options, 'output', 1);
  454. const hidden = option_find_int(options, 'hidden', 1);
  455. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  456. const inputs = params.inputs;
  457. layer.input_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  458. make_connected_layer(layer.input_layer, 'input_', inputs, hidden, batch_normalize);
  459. layer.self_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  460. make_connected_layer(layer.self_layer, 'self_', hidden, hidden, batch_normalize);
  461. layer.output_layer = { weights: [], outputs: layer.outputs, chain: [] };
  462. make_connected_layer(layer.output_layer, 'output_', hidden, outputs, batch_normalize);
  463. layer.weights = layer.weights.concat(layer.input_layer.weights);
  464. layer.weights = layer.weights.concat(layer.self_layer.weights);
  465. layer.weights = layer.weights.concat(layer.output_layer.weights);
  466. layer.out_w = 1;
  467. layer.out_h = 1;
  468. layer.out_c = outputs;
  469. layer.out = outputs;
  470. break;
  471. }
  472. case 'gru': {
  473. const inputs = params.inputs;
  474. const outputs = option_find_int(options, 'output', 1);
  475. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  476. layer.input_z_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  477. make_connected_layer(layer.input_z_layer, 'input_z', inputs, outputs, batch_normalize);
  478. layer.state_z_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  479. make_connected_layer(layer.state_z_layer, 'state_z', outputs, outputs, batch_normalize);
  480. layer.input_r_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  481. make_connected_layer(layer.input_r_layer, 'input_r', inputs, outputs, batch_normalize);
  482. layer.state_r_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  483. make_connected_layer(layer.state_r_layer, 'state_r', outputs, outputs, batch_normalize);
  484. layer.input_h_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  485. make_connected_layer(layer.input_h_layer, 'input_h', inputs, outputs, batch_normalize);
  486. layer.state_h_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  487. make_connected_layer(layer.state_h_layer, 'state_h', outputs, outputs, batch_normalize);
  488. layer.weights = layer.weights.concat(layer.input_z_layer.weights);
  489. layer.weights = layer.weights.concat(layer.state_z_layer.weights);
  490. layer.weights = layer.weights.concat(layer.input_r_layer.weights);
  491. layer.weights = layer.weights.concat(layer.state_r_layer.weights);
  492. layer.weights = layer.weights.concat(layer.input_h_layer.weights);
  493. layer.weights = layer.weights.concat(layer.state_h_layer.weights);
  494. layer.out = outputs;
  495. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ outputs ], 'gru'));
  496. break;
  497. }
  498. case 'lstm': {
  499. const inputs = params.inputs;
  500. const outputs = option_find_int(options, 'output', 1);
  501. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  502. layer.uf = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  503. make_connected_layer(layer.uf, 'uf_', inputs, outputs, batch_normalize);
  504. layer.ui = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  505. make_connected_layer(layer.ui, 'ui_', inputs, outputs, batch_normalize);
  506. layer.ug = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  507. make_connected_layer(layer.ug, 'ug_', inputs, outputs, batch_normalize);
  508. layer.uo = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  509. make_connected_layer(layer.uo, 'uo_', inputs, outputs, batch_normalize);
  510. layer.wf = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  511. make_connected_layer(layer.wf, 'wf_', outputs, outputs, batch_normalize);
  512. layer.wi = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  513. make_connected_layer(layer.wi, 'wi_', outputs, outputs, batch_normalize);
  514. layer.wg = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  515. make_connected_layer(layer.wg, 'wg_', outputs, outputs, batch_normalize);
  516. layer.wo = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  517. make_connected_layer(layer.wo, 'wo_', outputs, outputs, batch_normalize);
  518. layer.weights = layer.weights.concat(layer.uf.weights);
  519. layer.weights = layer.weights.concat(layer.ui.weights);
  520. layer.weights = layer.weights.concat(layer.ug.weights);
  521. layer.weights = layer.weights.concat(layer.uo.weights);
  522. layer.weights = layer.weights.concat(layer.wf.weights);
  523. layer.weights = layer.weights.concat(layer.wi.weights);
  524. layer.weights = layer.weights.concat(layer.wg.weights);
  525. layer.weights = layer.weights.concat(layer.wo.weights);
  526. layer.out_w = 1;
  527. layer.out_h = 1;
  528. layer.out_c = outputs;
  529. layer.out = outputs;
  530. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ outputs ], 'lstm'));
  531. weights = null;
  532. break;
  533. }
  534. case 'conv_lstm': {
  535. const size = option_find_int(options, "size", 3);
  536. const stride = option_find_int(options, "stride", 1);
  537. const output_filters = option_find_int(options, "output", 1);
  538. const groups = option_find_int(options, "groups", 1);
  539. const pad = option_find_int(options, "pad", 0);
  540. const padding = pad ? (size >> 1) : option_find_int(options, 'padding', 0);
  541. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  542. const bottleneck = option_find_int(options, "bottleneck", 0);
  543. const peephole = option_find_int(options, "peephole", 0);
  544. layer.uf = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  545. make_convolutional_layer(layer.uf, 'uf_', params.h, params.w, params.c, output_filters, groups, size, stride, stride, padding, batch_normalize);
  546. layer.ui = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  547. make_convolutional_layer(layer.ui, 'ui_', params.h, params.w, params.c, output_filters, groups, size, stride, stride, padding, batch_normalize);
  548. layer.ug = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  549. make_convolutional_layer(layer.ug, 'ug_', params.h, params.w, params.c, output_filters, groups, size, stride, stride, padding, batch_normalize);
  550. layer.uo = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  551. make_convolutional_layer(layer.uo, 'uo_', params.h, params.w, params.c, output_filters, groups, size, stride, stride, padding, batch_normalize);
  552. layer.weights = layer.weights.concat(layer.uf.weights);
  553. layer.weights = layer.weights.concat(layer.ui.weights);
  554. layer.weights = layer.weights.concat(layer.ug.weights);
  555. layer.weights = layer.weights.concat(layer.uo.weights);
  556. if (bottleneck) {
  557. layer.wf = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  558. make_convolutional_layer(layer.wf, 'wf_', params.h, params.w, output_filters * 2, output_filters, groups, size, stride, stride, padding, batch_normalize);
  559. layer.weights = layer.weights.concat(layer.wf.weights);
  560. }
  561. else {
  562. layer.wf = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  563. make_convolutional_layer(layer.wf, 'wf_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  564. layer.wi = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  565. make_convolutional_layer(layer.wi, 'wi_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  566. layer.wg = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  567. make_convolutional_layer(layer.wg, 'wg_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  568. layer.wo = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  569. make_convolutional_layer(layer.wo, 'wo_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  570. layer.weights = layer.weights.concat(layer.wf.weights);
  571. layer.weights = layer.weights.concat(layer.wi.weights);
  572. layer.weights = layer.weights.concat(layer.wg.weights);
  573. layer.weights = layer.weights.concat(layer.wo.weights);
  574. }
  575. if (peephole) {
  576. layer.vf = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  577. make_convolutional_layer(layer.vf, 'vf_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  578. layer.vi = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  579. make_convolutional_layer(layer.vi, 'vi_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  580. layer.vo = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  581. make_convolutional_layer(layer.vo, 'vo_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  582. layer.weights = layer.weights.concat(layer.vf.weights);
  583. layer.weights = layer.weights.concat(layer.vi.weights);
  584. layer.weights = layer.weights.concat(layer.vo.weights);
  585. }
  586. layer.out_h = layer.uo.out_h;
  587. layer.out_w = layer.uo.out_w;
  588. layer.out_c = output_filters;
  589. layer.out = layer.out_h * layer.out_w * layer.out_c;
  590. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'conv_lstm'));
  591. break;
  592. }
  593. case 'softmax': {
  594. layer.out_w = params.w;
  595. layer.out_h = params.h;
  596. layer.out_c = params.c;
  597. layer.out = params.inputs;
  598. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'softmax'));
  599. break;
  600. }
  601. case 'dropout': {
  602. layer.out_w = params.w;
  603. layer.out_h = params.h;
  604. layer.out_c = params.c;
  605. layer.out = params.inputs;
  606. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'dropout'));
  607. break;
  608. }
  609. case 'upsample': {
  610. const stride = option_find_int(options, 'stride', 2);
  611. layer.out_w = params.w * stride;
  612. layer.out_h = params.h * stride;
  613. layer.out_c = params.c;
  614. layer.out = layer.out_w * layer.out_h * layer.out_c;
  615. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'upsample'));
  616. break;
  617. }
  618. case 'crop': {
  619. const shape = layer.inputs[0].type.shape.dimensions;
  620. if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) {
  621. throw new darknet.Error('Layer before crop layer must output image.');
  622. }
  623. const crop_height = option_find_int(options, 'crop_height', 1);
  624. const crop_width = option_find_int(options, 'crop_width', 1);
  625. layer.out_w = crop_width;
  626. layer.out_h = crop_height;
  627. layer.out_c = params.c;
  628. layer.out = layer.out_w * layer.out_h * layer.out_c;
  629. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'crop'));
  630. break;
  631. }
  632. case 'yolo': {
  633. const classes = option_find_int(options, 'classes', 20);
  634. const n = option_find_int(options, 'num', 1);
  635. layer.out_h = params.h;
  636. layer.out_w = params.w;
  637. layer.out_c = n * (classes + 4 + 1);
  638. layer.out = layer.out_h * layer.out_w * layer.out_c;
  639. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'yolo'));
  640. break;
  641. }
  642. case 'Gaussian_yolo': {
  643. const classes = option_find_int(options, 'classes', 20);
  644. const n = option_find_int(options, 'num', 1);
  645. layer.out_h = params.h;
  646. layer.out_w = params.w;
  647. layer.out_c = n * (classes + 8 + 1);
  648. layer.out = layer.out_h * layer.out_w * layer.out_c;
  649. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'Gaussian_yolo'));
  650. break;
  651. }
  652. case 'region': {
  653. const coords = option_find_int(options, 'coords', 4);
  654. const classes = option_find_int(options, 'classes', 20);
  655. const num = option_find_int(options, 'num', 1);
  656. layer.out = params.h * params.w * num * (classes + coords + 1);
  657. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ params.h, params.w, num, (classes + coords + 1) ], 'region'));
  658. break;
  659. }
  660. case 'cost': {
  661. layer.out = params.inputs;
  662. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'cost'));
  663. break;
  664. }
  665. case 'reorg': {
  666. const stride = option_find_int(options, 'stride', 1);
  667. const reverse = option_find_int(options, 'reverse', 0);
  668. const extra = option_find_int(options, 'extra', 0);
  669. if (reverse) {
  670. layer.out_w = params.w * stride;
  671. layer.out_h = params.h * stride;
  672. layer.out_c = Math.floor(params.c / (stride * stride));
  673. layer.out = layer.out_h * layer.out_w * layer.out_c;
  674. }
  675. else {
  676. layer.out_w = Math.floor(params.w / stride);
  677. layer.out_h = Math.floor(params.h / stride);
  678. layer.out_c = params.c * (stride * stride);
  679. layer.out = layer.out_h * layer.out_w * layer.out_c;
  680. }
  681. if (extra) {
  682. layer.out_w = 0;
  683. layer.out_h = 0;
  684. layer.out_c = 0;
  685. layer.out = (params.h * params.w * params.c) + extra;
  686. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'reorg'));
  687. }
  688. else {
  689. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'reorg'));
  690. }
  691. break;
  692. }
  693. case 'route': {
  694. const layers = [].concat(layer.layers);
  695. const groups = option_find_int(options, 'groups', 1);
  696. layer.out = 0;
  697. for (const next of layers) {
  698. layer.out += next.outputs / groups;
  699. }
  700. if (layers.length > 0) {
  701. const first = layers.shift();
  702. layer.out_w = first.out_w;
  703. layer.out_h = first.out_h;
  704. layer.out_c = first.out_c / groups;
  705. while (layers.length > 0) {
  706. const next = layers.shift();
  707. if (next.out_w === first.out_w && next.out_h === first.out_h) {
  708. layer.out_c += next.out_c;
  709. continue;
  710. }
  711. infer = false;
  712. break;
  713. }
  714. if (infer) {
  715. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'route'));
  716. }
  717. }
  718. else {
  719. infer = false;
  720. }
  721. if (!infer) {
  722. layer.out_h = 0;
  723. layer.out_w = 0;
  724. layer.out_c = 0;
  725. }
  726. break;
  727. }
  728. case 'sam':
  729. case 'scale_channels': {
  730. const activation = option_find_str(options, 'activation', 'linear');
  731. const from = layer.from;
  732. if (from) {
  733. layer.out_w = from.out_w;
  734. layer.out_h = from.out_h;
  735. layer.out_c = from.out_c;
  736. layer.out = layer.out_w * layer.out_h * layer.out_c;
  737. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'shortcut|scale_channels|sam'));
  738. }
  739. if (activation !== 'linear' && activation !== 'none') {
  740. layer.chain.push({ type: activation });
  741. }
  742. break;
  743. }
  744. case 'shortcut': {
  745. const activation = option_find_str(options, 'activation', 'linear');
  746. layer.out_w = params.w;
  747. layer.out_h = params.h;
  748. layer.out_c = params.c;
  749. layer.out = params.w * params.h * params.c;
  750. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ params.w, params.h, params.c ], 'shortcut|scale_channels|sam'));
  751. if (activation !== 'linear' && activation !== 'none') {
  752. layer.chain.push({ type: activation });
  753. }
  754. break;
  755. }
  756. case 'detection': {
  757. layer.out_w = params.w;
  758. layer.out_h = params.h;
  759. layer.out_c = params.c;
  760. layer.out = params.inputs;
  761. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'detection'));
  762. break;
  763. }
  764. default: {
  765. infer = false;
  766. break;
  767. }
  768. }
  769. params.h = layer.out_h;
  770. params.w = layer.out_w;
  771. params.c = layer.out_c;
  772. params.inputs = layer.out;
  773. params.last = section;
  774. }
  775. params.arguments = layer.outputs;
  776. }
  777. for (let i = 0; i < sections.length; i++) {
  778. this._nodes.push(new darknet.Node(metadata, net, sections[i]));
  779. }
  780. if (weights) {
  781. weights.validate();
  782. }
  783. }
  784. get inputs() {
  785. return this._inputs;
  786. }
  787. get outputs() {
  788. return this._outputs;
  789. }
  790. get nodes() {
  791. return this._nodes;
  792. }
  793. };
  794. darknet.Parameter = class {
  795. constructor(name, visible, args) {
  796. this._name = name;
  797. this._visible = visible;
  798. this._arguments = args;
  799. }
  800. get name() {
  801. return this._name;
  802. }
  803. get visible() {
  804. return this._visible;
  805. }
  806. get arguments() {
  807. return this._arguments;
  808. }
  809. };
  810. darknet.Argument = class {
  811. constructor(name, type, initializer) {
  812. if (typeof name !== 'string') {
  813. throw new darknet.Error("Invalid argument identifier '" + JSON.stringify(name) + "'.");
  814. }
  815. this._name = name;
  816. this._type = type;
  817. this._initializer = initializer;
  818. }
  819. get name() {
  820. return this._name;
  821. }
  822. get type() {
  823. if (this._initializer) {
  824. return this._initializer.type;
  825. }
  826. return this._type;
  827. }
  828. set type(value) {
  829. if (this._type) {
  830. throw new darknet.Error('Invalid argument type set operation.');
  831. }
  832. this._type = value;
  833. }
  834. get initializer() {
  835. return this._initializer;
  836. }
  837. };
  838. darknet.Node = class {
  839. constructor(metadata, net, section) {
  840. this._name = section.name || '';
  841. this._location = section.line !== undefined ? section.line.toString() : undefined;
  842. this._attributes = [];
  843. this._inputs = [];
  844. this._outputs = [];
  845. this._chain = [];
  846. const type = section.type;
  847. this._type = metadata.type(type) || { name: type };
  848. const layer = section.layer;
  849. if (layer && layer.inputs && layer.inputs.length > 0) {
  850. this._inputs.push(new darknet.Parameter(layer.inputs.length <= 1 ? 'input' : 'inputs', true, layer.inputs));
  851. }
  852. if (layer && layer.weights && layer.weights.length > 0) {
  853. this._inputs = this._inputs.concat(layer.weights);
  854. }
  855. if (layer && layer.outputs && layer.outputs.length > 0) {
  856. this._outputs.push(new darknet.Parameter(layer.outputs.length <= 1 ? 'output' : 'outputs', true, layer.outputs));
  857. }
  858. if (layer && layer.chain) {
  859. for (const chain of layer.chain) {
  860. this._chain.push(new darknet.Node(metadata, net, chain, ''));
  861. }
  862. }
  863. const options = section.options;
  864. if (options) {
  865. for (const key of Object.keys(options)) {
  866. this._attributes.push(new darknet.Attribute(metadata.attribute(type, key), key, options[key]));
  867. }
  868. }
  869. }
  870. get name() {
  871. return this._name;
  872. }
  873. get location() {
  874. return this._location;
  875. }
  876. get type() {
  877. return this._type;
  878. }
  879. get attributes() {
  880. return this._attributes;
  881. }
  882. get inputs() {
  883. return this._inputs;
  884. }
  885. get outputs() {
  886. return this._outputs;
  887. }
  888. get chain() {
  889. return this._chain;
  890. }
  891. };
  892. darknet.Attribute = class {
  893. constructor(schema, name, value) {
  894. this._name = name;
  895. this._value = value;
  896. if (schema) {
  897. this._type = schema.type || '';
  898. switch (this._type) {
  899. case '':
  900. case 'string': {
  901. break;
  902. }
  903. case 'int32': {
  904. const number = parseInt(this._value, 10);
  905. if (Number.isInteger(number)) {
  906. this._value = number;
  907. }
  908. break;
  909. }
  910. case 'float32': {
  911. const number = parseFloat(this._value);
  912. if (!isNaN(number)) {
  913. this._value = number;
  914. }
  915. break;
  916. }
  917. case 'int32[]': {
  918. const numbers = this._value.split(',').map((item) => parseInt(item.trim(), 10));
  919. if (numbers.every((number) => Number.isInteger(number))) {
  920. this._value = numbers;
  921. }
  922. break;
  923. }
  924. default: {
  925. throw new darknet.Error("Unsupported attribute type '" + this._type + "'.");
  926. }
  927. }
  928. if (Object.prototype.hasOwnProperty.call(schema, 'visible') && !schema.visible) {
  929. this._visible = false;
  930. }
  931. else if (Object.prototype.hasOwnProperty.call(schema, 'default')) {
  932. if (this._value == schema.default) {
  933. this._visible = false;
  934. }
  935. }
  936. }
  937. }
  938. get name() {
  939. return this._name;
  940. }
  941. get type() {
  942. return this._type;
  943. }
  944. get value() {
  945. return this._value;
  946. }
  947. get visible() {
  948. return this._visible == false ? false : true;
  949. }
  950. };
  951. darknet.Tensor = class {
  952. constructor(type, data) {
  953. this._type = type;
  954. this._data = data;
  955. }
  956. get kind() {
  957. return 'Tensor';
  958. }
  959. get name() {
  960. return '';
  961. }
  962. get type() {
  963. return this._type;
  964. }
  965. get state() {
  966. return this._context().state;
  967. }
  968. get value() {
  969. const context = this._context();
  970. if (context.state) {
  971. return null;
  972. }
  973. context.limit = Number.MAX_SAFE_INTEGER;
  974. return this._decode(context, 0);
  975. }
  976. toString() {
  977. const context = this._context();
  978. if (context.state) {
  979. return '';
  980. }
  981. context.limit = 10000;
  982. const value = this._decode(context, 0);
  983. return JSON.stringify(value, null, 4);
  984. }
  985. _context() {
  986. const context = {};
  987. if (!this._data) {
  988. context.state = 'Tensor data is empty.';
  989. return context;
  990. }
  991. context.state = null;
  992. context.position = 0;
  993. context.count = 0;
  994. context.dataView = new DataView(this._data.buffer, this._data.byteOffset, this._data.byteLength);
  995. context.dimensions = this.type.shape.dimensions;
  996. return context;
  997. }
  998. _decode(context, dimension) {
  999. const results = [];
  1000. const size = context.dimensions[dimension];
  1001. if (dimension == context.dimensions.length - 1) {
  1002. for (let i = 0; i < size; i++) {
  1003. if (context.count > context.limit) {
  1004. results.push('...');
  1005. return results;
  1006. }
  1007. results.push(context.dataView.getFloat32(context.position, true));
  1008. context.position += 4;
  1009. context.count++;
  1010. }
  1011. }
  1012. else {
  1013. for (let j = 0; j < size; j++) {
  1014. if (context.count > context.limit) {
  1015. results.push('...');
  1016. return results;
  1017. }
  1018. results.push(this._decode(context, dimension + 1));
  1019. }
  1020. }
  1021. return results;
  1022. }
  1023. };
  1024. darknet.TensorType = class {
  1025. constructor(dataType, shape) {
  1026. this._dataType = dataType;
  1027. this._shape = shape;
  1028. }
  1029. get dataType() {
  1030. return this._dataType;
  1031. }
  1032. get shape() {
  1033. return this._shape;
  1034. }
  1035. toString() {
  1036. return (this._dataType || '?') + this._shape.toString();
  1037. }
  1038. };
  1039. darknet.TensorShape = class {
  1040. constructor(dimensions) {
  1041. if (dimensions.some((dimension) => dimension === 0 || dimension === undefined || isNaN(dimension))) {
  1042. throw new darknet.Error("Invalid tensor shape '" + JSON.stringify(dimensions) + "'.");
  1043. }
  1044. this._dimensions = dimensions;
  1045. }
  1046. get dimensions() {
  1047. return this._dimensions;
  1048. }
  1049. toString() {
  1050. if (this._dimensions) {
  1051. if (this._dimensions.length == 0) {
  1052. return '';
  1053. }
  1054. return '[' + this._dimensions.map((dimension) => dimension.toString()).join(',') + ']';
  1055. }
  1056. return '';
  1057. }
  1058. };
  1059. darknet.Weights = class {
  1060. static open(stream) {
  1061. if (stream && stream.length >= 20) {
  1062. const buffer = stream.peek(12);
  1063. const view = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength);
  1064. const major = view.getInt32(0, true);
  1065. const minor = view.getInt32(4, true);
  1066. view.getInt32(8, true); // revision
  1067. const transpose = (major > 1000) || (minor > 1000);
  1068. if (!transpose) {
  1069. stream.skip(12 + (((major * 10 + minor) >= 2) ? 8 : 4));
  1070. return new darknet.Weights(stream);
  1071. }
  1072. }
  1073. return null;
  1074. }
  1075. constructor(stream) {
  1076. this._stream = stream;
  1077. }
  1078. read(size) {
  1079. return this._stream.read(size);
  1080. }
  1081. validate() {
  1082. if (this._stream.position != this._stream.length) {
  1083. throw new darknet.Error('Invalid weights size.');
  1084. }
  1085. }
  1086. };
  1087. darknet.Error = class extends Error {
  1088. constructor(message) {
  1089. super(message);
  1090. this.name = 'Error loading Darknet model.';
  1091. }
  1092. };
  1093. if (typeof module !== 'undefined' && typeof module.exports === 'object') {
  1094. module.exports.ModelFactory = darknet.ModelFactory;
  1095. }