From 1f96550385f0c66107e000538be1769b9b81c289 Mon Sep 17 00:00:00 2001 From: "Kwabena W. Agyeman" Date: Wed, 30 Oct 2019 00:22:12 -0700 Subject: [PATCH 1/4] Reuse Code Just trying to optimize things since flash is now not unlimited anymore. --- .../tf_mobilenet_search_whole_window.py | 0 .../tf_mobilenet_serach_just_center.py | 0 src/omv/py/py_tf.c | 46 +++++++++---------- 3 files changed, 22 insertions(+), 24 deletions(-) create mode 100644 scripts/examples/25-Machine-Learning/tf_mobilenet_search_whole_window.py create mode 100644 scripts/examples/25-Machine-Learning/tf_mobilenet_serach_just_center.py diff --git a/scripts/examples/25-Machine-Learning/tf_mobilenet_search_whole_window.py b/scripts/examples/25-Machine-Learning/tf_mobilenet_search_whole_window.py new file mode 100644 index 000000000..e69de29bb diff --git a/scripts/examples/25-Machine-Learning/tf_mobilenet_serach_just_center.py b/scripts/examples/25-Machine-Learning/tf_mobilenet_serach_just_center.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/omv/py/py_tf.c b/src/omv/py/py_tf.c index 56947dce1..548acbbe2 100644 --- a/src/omv/py/py_tf.c +++ b/src/omv/py/py_tf.c @@ -137,6 +137,7 @@ STATIC mp_obj_t int_py_tf_load(mp_obj_t path_obj, bool mode) } fb_alloc_mark(); + uint32_t tensor_arena_size; uint8_t *tensor_arena = fb_alloc_all(&tensor_arena_size, FB_ALLOC_PREFER_SIZE); @@ -153,6 +154,21 @@ STATIC mp_obj_t int_py_tf_load(mp_obj_t path_obj, bool mode) return tf_model; } +STATIC mp_obj_t py_tf_load_xalloc(mp_obj_t path_obj) +{ + return int_py_tf_load(path_obj, false); +} +STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_load_obj, py_tf_load_xalloc); + +STATIC py_tf_model_obj_t *py_tf_load_fb_alloc(mp_obj_t path_obj) +{ + if (MP_OBJ_IS_TYPE(path_obj, &py_tf_model_type)) { + return (py_tf_model_obj_t *) path_obj; + } else { + return (py_tf_model_obj_t *) int_py_tf_load(path_obj, true); + } +} + typedef struct py_tf_input_data_callback_data { image_t *img; rectangle_t *roi; @@ -286,7 +302,9 @@ STATIC void py_tf_classify_output_data_callback(void *callback_data, STATIC mp_obj_t py_tf_classify(uint n_args, const mp_obj_t *args, mp_map_t *kw_args) { - py_tf_model_obj_t *arg_model; + fb_alloc_mark(); + + py_tf_model_obj_t *arg_model = py_tf_load_fb_alloc(args[0]); image_t *arg_img = py_helper_arg_to_image_mutable(args[1]); rectangle_t roi; @@ -304,14 +322,6 @@ STATIC mp_obj_t py_tf_classify(uint n_args, const mp_obj_t *args, mp_map_t *kw_a float arg_y_overlap = py_helper_keyword_float(n_args, args, 6, kw_args, MP_OBJ_NEW_QSTR(MP_QSTR_y_overlap), 0.0f); PY_ASSERT_TRUE_MSG(((0.0f <= arg_y_overlap) && (arg_y_overlap < 1.0f)) || (arg_y_overlap == -1.0f), "0 <= y_overlap < 1"); - fb_alloc_mark(); - - if (MP_OBJ_IS_TYPE(args[0], &py_tf_model_type)) { - arg_model = (py_tf_model_obj_t *) args[0]; - } else { - arg_model = int_py_tf_load(args[0], true); - } - uint32_t tensor_arena_size; uint8_t *tensor_arena = fb_alloc_all(&tensor_arena_size, FB_ALLOC_PREFER_SIZE); @@ -404,20 +414,14 @@ STATIC void py_tf_segment_output_data_callback(void *callback_data, STATIC mp_obj_t py_tf_segment(uint n_args, const mp_obj_t *args, mp_map_t *kw_args) { - py_tf_model_obj_t *arg_model; + fb_alloc_mark(); + + py_tf_model_obj_t *arg_model = py_tf_load_fb_alloc(args[0]); image_t *arg_img = py_helper_arg_to_image_mutable(args[1]); rectangle_t roi; py_helper_keyword_rectangle_roi(arg_img, n_args, args, 2, kw_args, &roi); - fb_alloc_mark(); - - if (MP_OBJ_IS_TYPE(args[0], &py_tf_model_type)) { - arg_model = (py_tf_model_obj_t *) args[0]; - } else { - arg_model = int_py_tf_load(args[0], true); - } - uint32_t tensor_arena_size; uint8_t *tensor_arena = fb_alloc_all(&tensor_arena_size, FB_ALLOC_PREFER_SIZE); @@ -470,12 +474,6 @@ STATIC const mp_obj_type_t py_tf_model_type = { .locals_dict = (mp_obj_t) &locals_dict }; -STATIC mp_obj_t py_tf_load(mp_obj_t path_obj) -{ - return int_py_tf_load(path_obj, false); -} -STATIC MP_DEFINE_CONST_FUN_OBJ_1(py_tf_load_obj, py_tf_load); - #endif // IMLIB_ENABLE_TF STATIC const mp_rom_map_elem_t globals_dict_table[] = { From 905d20a03862ce0b89bc6a74a568f0773de5e82c Mon Sep 17 00:00:00 2001 From: "Kwabena W. Agyeman" Date: Wed, 30 Oct 2019 00:22:57 -0700 Subject: [PATCH 2/4] Updated the TensorFlow library to accept more model types Now models with [n][c] will work too as their output. --- src/libtf/cortex-m4/libtf.a | Bin 455546 -> 455610 bytes src/libtf/cortex-m7/libtf.a | Bin 453862 -> 453918 bytes 2 files changed, 0 insertions(+), 0 deletions(-) diff --git a/src/libtf/cortex-m4/libtf.a b/src/libtf/cortex-m4/libtf.a index bd6d760b1a350b78db0af6c6a24bff0a2936637c..aac3ad254ac8bbe43512631a276d101eec62e3a1 100644 GIT binary patch delta 3313 zcmbuC4NOy46vxl|xKF-eFli|W6_8H{rL?rL&7fe_#W*$2xd91&@&*Q7A&wBR7@`v0 zT=g01Us#i6L%Q{QHcRi|cg8={FB+cM@puz?w)4xBMgci-!M(11)L4Zogy{`a1H z&pGe5z{8u-r##Wl=qW~1mepW2?~c!P>;Tvt0MPg;!1fM+ohJdDc-)Nj)=gdx>;ZTh zosR)VBdr2>78lY0R2=f)O6~K}hZ7hGpA?SCZXm}~ZD%IV8#JOBuiT4ZNoGUamMlvR&*|p^wn@X(^0@pm5 zk-MnyEmSx&x^rl2iN0yeF*XG?qaR~!?M)G%|Fwci_wPmrvxLmGQym%`vzr#4{PXw; z^AWud9s0f>s**YaZK*a{qxZ@0HD6IS@2HX2@pM{nH=7oScZgje&r*rkJ?LNz8DH@f z^VCd)XSGDRD z2b`XfjIVmulk_r(@{Nb7VbYDl?sajFL`D0;qn5jsPs8PMxLi5?Y07N7r-G-_WnZWG ztx9XMoIwHaj@dZT`WU0+mLQpA@Oy;o8+ted6>0Ep}PUW&Tq zitA!B>3qu7(8cJ)=JFJZW+~xpMJ&aOzgD!<%X!?YfOvp9*+v625!*uSVPbD#OY3o4 zWg7-CC|mqX`2q1C5g#a77fLJRg@={a%HOdpZ+nke7EjB52(kI{ay>wyZ1J}9iCswS zVq%xc_C9fO3zswCQggd2w=!(YYQ;~y|! z}22X7TmbNRF(` zb?Gm%8nN-fq98IWIPk`|JLi!^attnmmD7sLq|buX7$sNTh3r7DkYiAU@+)PL>IzqP zFRK;r^bIjcIaAo&s1@mf4^NPYY@CxuI@(6fNh6BUb@n)?kwPz1w0>CXw$K_WbAWm~ zmPjVa=||^gdOb_lcDz99wlG(9WZo?A5+$8m%9s*}WN|6GL~LRnPa~35IX!bPGoYTH zE9u=#eU!Ha7FXyXijq$EFhgb%p?q0NT_Nn0aIa!fYL@Ql+4gWXD()hymBegzTPRtR zE#()m!Fj|pE2mb}vq$C;#VFzC&kj~sE002QTw`A&owk@I+{bfC>IU8x9Yv@sUX|p| zOD!qr>la8C^5c_g^H3{!I2Mbt#ArhiQ%Mkh=59tKtu8{_v_!H>cdF2k`oDll389_n z_?q?Pm)+;8M2TfI$-+sIuakIG7tYvn3PJ=QFxu{)1(8ETYK!q;E{xdB#_ K6xPF)NB;*3C9YQh delta 3010 zcmbuBe{7RQ7{{M`-}hb^b6e#{yE25iEo<47b!}PSPK<5{lWA}QCdqv{2fle8sKI_qyN8@CHi(nvVmt)BtR$187?Z5Mtx)Om8oU zv1b{;-RS%bU?emH;9eXZ05Aw@gOi2#$0c@v{=NwjoM-}VJ?hvne!e*m=nla~ zIE2aT;GTF6f-oV16HPGw_2RNOVH80_eDyaF#{(GxalG`%Adckh8z9CJjCaNwOUduT zDAK(Jl2I<}eM+Y?*f;1kE$smoSZz)LeoIeQCUBGw)#0VOLyI6Z2V1vR^ z!0{{=0FT8=C*ZMYIt@6UV7#-n4{$t}asZFy#!bLuAlsM1C=@fu|3h$ia#0S*6JfOz zG4GvVS0{N2O^K^_~D-e6T1$*;W_`-flPCA}?X(Vg!lM*?$E zDoL!qO){##p8@Q7n&D=YGp=`IUnxnc$wb8@@8ua}Rkewf*VrF*EV=#X&~21;CW`mX z7%O znVnsGsu@~6JIKn~#00IX!;Es(Uu!|+@7n2Vm+u@xk#OY+F;8%yaPJ9mrh2;0j)W`l z_L|wKT%EIKuOPFpLzt35%GP?UQvuE~PcQ*o&@A&W>Xz(YYa>tiU2D%XcS;(-Rc`;{ zHXFrs2eU#t>#=588im^h+&;oLZX@MZ!`(lr;x*q*}^K3L@t`iEf(`BNo=yAD&lIg;WCMR;giay zOyQhFUT?}4u1h4n(>(QYwg>wI2Sa;AOnP@($nQ-l!X``_I&03P3TP| zfxujT#R{}5rXC8slwb(qvOjRDx?n>yHWanDHiIwngjul`x_2dRO*JIy z2;f2E=WV{8wK#+yd%>93F9~hGgu9YslGj1m^?csV192L0cX@BM+`(@=QzC7tw5neIm0x3TtYei8@>l##d7ZJdwafLq9Ir#~>bD2v WdB&>zgWd8H{{FpAT6R{hOZyL`SX(Us diff --git a/src/libtf/cortex-m7/libtf.a b/src/libtf/cortex-m7/libtf.a index 1ceda66b4351b3fc1a461e2103fa49de5a8b9208..9b028e249a22afaf323d5e22a8b4ba8bd3ad4b00 100644 GIT binary patch delta 3247 zcmbuCdu$X{6o=297kx@CrQ3E3eWsP|YiD;^c2z8;b$M8{L1IvgG#1;T^nu&{Q7~ai z02`D@;c78}RIFkH5y3%50cnE-3<DVvr`LpS+y?)jZ_ z&zy5-_I0(}yr;+9Xr5$qh%V74ZcBFi_5rL*1K2PC&@c$FB?O?6teY+a>^kRX?^S?@ zad08PNR;gWkK#1hXW-Ko9m$W!mDd2&>IvbSXaQ|M+I?*NoP8S51AGr0^%9@}7~uju z5@$3p6T&yq0^{G$F1i4t@EM{v64>$3=md7W94EOWxi|o90KPzHtTh)}2BVm^4mbnr zT;N8MyC1km;~Sl{#tGbbXio!fBnnmUEs$+Na=x5*egKzAAG~XIcq_f2*uk$7)jY; z5HwZ=&}0L_;J~qB5Dp2CPnQLRv5_-{kCy2l_ExtQ7pW%J=5pWzSMHJ`UDA}2eJZro-544I`kGIyMkOJLkaev;kC(o>vuRm^XAzxYDK?iS zp#n0(yevCyrkk`X(sh?wNn6dL$~l#-XESX0a9M&cMc0m~#^{?Q3u#Q~WGvb!@p|+N zb(MrNG(T4NKhZ}GA%66Nc#n2CACl|&$KI-qEwxIaopByA9uJM-cit_6)T#oIvZU5| zK9%wOdX2UAtNNTczuXjTOn5>rEjOd^(q);6Z)KNfC)yHgy!fB;IC-1&J3{@qrXmLw z$jue`C@Bm9M1lZs5No_5$YLY$HEFfXR<Dr}i^yk5GG?ScdGs zuh~d|f2pt4(tM8EAcWUfr{U=}u9bh&t>ia@URvu&yF@g#^R+FcWFfUh)GncRsb-@9 ziZx3vQ9|tsY9-B*{grZCO)i6q@zC2DOc=tM-ehzUzvNBl9SoThzvE3og}B+9&V0_| z^`A!L%iahkDh&5~(@-lm)u!_wa{$dazcvNAaCvPy$=+I<&MfC~L_;*bP#b|R<6a`f z2so%N4b|hEx^yN!3@@#lA!xsstS;Q!V3POLl?EGrTT_gjuSM|MBRGy$6bRo>bCAEI?tKp-o2hM zBOmuQM9XszW(4~Mx9CrKU)f1h$h%%oIek2YrjRPRyh%29#6=kjNioSUU%5-$cDv;w z_4#jU)SfPMOt#XYByE%vsZG~aKCt{ z;`DDzXy+{9hNO`DG1Gt0qIH&bib_j2H;_79L_4QrgWSF3VMXh$BZ)t0^ybzl+HdEZ zL;X3TsF3@^PoPf6_#vw3FI}Wx^l+ z=au}K&`$GMJjN_*%NUHYQ0UlbyRNi>q#Z012n0fiK{AY(F{|5nw2k2* zE8#UBMg}LW3S^U+;35j@)!YQoA)*8l2J(oGATC0vC}GP2B9g40dwcGv8uBP8=Nv}zN&f#%++)mf7EN^5Pz`DBt8&3jM{{XP#B0vor*B%7ew>!as z!vIg>a4JA|ltlnf;}px&iKv6-Kc0;nSpYG2zlijA0yQ7)-_UnX3xFOId91|h03{)i zT6ii2*FoqPk^WB5_k3|qE%YFw8&_Thu`lfG1hKCiWwG74d=kVYBFRc`W66*QJ;*2q zNyk1Ar0)2FAU&N(&!APEAoYc&yC8MvoExNMT38WJqH{Y)8WDZaQ{pjo&_(_lz{&XB zfO})XG{C)4aSL!Vkz{4tyMU8vC4jqg_Zz^yK*lye5AvNr{)b4H(?1PJe<)c5q&sCJ zfT)xwvF9HkI)^rt06Eh9>|E>s(mVA03bR~9)-Be4@C&@&=z4=>LeA(1K5Ro%Wkcx; zh}udo+kibY7%s*_#`BEa+wFxo@ag@{i$@pFnq6#}{_?MX&;FKCC11bZeP0 z*FH1;pfirLTVl8|kB=|KDgtOCzJMET+;TQ1t`c*s^i0|4NTz1{GKPvtqnf)pkQ0z{ zBo;kF^L!L551^a@OgCwIJ)1dA8|~F*8S>-c`&;H02L22{c1qymK<*o`r?BHVt}Mz8 zG`_f%MH{qel)WJaP2sk9ob3fl8R`b#GpZVY)ZHFuER?HXPe*ikz$l*$Ttn#R=+3j^ zB*BB|g)<}MjyK06;U3VD6{Aq8Jf-48fdpC7z${v^Y*GgM_qxe6AqC)fRWkh!m+e69 z%U_AyQiIb0{^Al)LeJxJDwArupUWaHXK*=Nm8|S}s$|nI;Bq1NmvH@6Rc5k9*dwH| zkG#<8%J33-@QMin%BGWpIW#qBq;CaN(0*DSw33km>kHfCxh!O)KLxF5J&gyg!ZiaO z9~y@?(YYZj4omEnfUXUVML_FAR>6p)?N~^o*F!1DO52#_Af_v-#|f>N`orUhpWU3B zY|b57!c7CM2~QyEt44uWZVIm%ppSi3&6tmAQ`jWiHpRqbJ#TAUm+q@F%TwODDCvG* zl|`Po>-(;J)ju@aEa^9#`m0RxqkWxNH+<2sc#xjl6t>83AD)mX#BHbX0~Yy|^8O$_ zmY!hMtUYyfV3HwCo_{{w_#eae8xKUy;I^irp=KZsG}Qqm<4$qBWZ*xufkd zWKyOFMWWB+QP|QC^;Ma6O#Iry!#&ZIHf&ZVUJ>WG^+go+YcMQT*R-7|M>3>?sfjfm ztUqQYWS3?QD-N21)w$8`q1D76uFKHQc#6SuBnN$>~&Dx8ap9=>ckK%Uki@A>3_OSbKhPakp1>8?s4I-?&FL z?8_$|`c^zqc0gw36F+&#-=-a2t<>*INW%bq8tq&+mXY5Q*OR?*tCz!~Y%V7!_%S)1 qT2o%DCFR2tJ9I|psxfyCkz75^p|Cb(jODem8+MG{u#!vUq~%{ay>A8p From bcebe2cde225236b0f88e77a3219e3cf4a9cdb73 Mon Sep 17 00:00:00 2001 From: "Kwabena W. Agyeman" Date: Wed, 30 Oct 2019 00:24:43 -0700 Subject: [PATCH 3/4] Add mobilenet script examples Yes, mobilenet runs ont the OpenMV Cam now. It's midly interesting, however, it's really not meant to be used by itself. --- .../tf_mobilenet_search_whole_window.py | 56 +++++++++++++++++ .../tf_mobilenet_serach_just_center.py | 62 +++++++++++++++++++ 2 files changed, 118 insertions(+) diff --git a/scripts/examples/25-Machine-Learning/tf_mobilenet_search_whole_window.py b/scripts/examples/25-Machine-Learning/tf_mobilenet_search_whole_window.py index e69de29bb..8a2c559d6 100644 --- a/scripts/examples/25-Machine-Learning/tf_mobilenet_search_whole_window.py +++ b/scripts/examples/25-Machine-Learning/tf_mobilenet_search_whole_window.py @@ -0,0 +1,56 @@ +# TensorFlow Lite Mobilenet V1 Example +# +# Google's Mobilenet V1 detects 1000 classes of objects +# +# WARNING: Mobilenet is trained on ImageNet and isn't meant to classify anything +# in the real world. It's just designed to score well on the ImageNet dataset. +# This example just shows off running mobilenet on the OpenMV Cam. However, the +# default model is not really usable for anything. You have to use transfer +# learning to apply the model to a target problem by re-training the model. +# +# NOTE: This example only works on the OpenMV Cam H7 Pro (that has SDRAM) and better! +# +# In this example we slide the detector window over the image and get a list +# of activations. Note that use a CNN with a sliding window is extremely compute +# expensive so for an exhaustive search do not expect the CNN to be real-time. + +import sensor, image, time, os, tf + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.set_windowing((240, 240)) # Set 240x240 window. +sensor.skip_frames(time=2000) # Let the camera adjust. + +mobilenet_version = "1" # 1 +mobilenet_width = "0.5" # 1.0, 0.75, 0.50, 0.25 +mobilenet_resolution = "128" # 224, 192, 160, 128 + +mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % (mobilenet_version, mobilenet_width, mobilenet_resolution) +labels = [line.rstrip('\n') for line in open("mobilenet_labels.txt")] + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + + # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not + # specified). A classification score output vector will be generated for each location. At each scale the + # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. + # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note + # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after + # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) + # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. + # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... + + # default settings just do one detection... change them to search the image... + for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=0.0, y_overlap=0.0): + print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) + img.draw_rectangle(obj.rect()) + # This combines the labels and confidence values into a list of tuples + # and then sorts that list by the confidence values. + sorted_list = sorted(zip(labels, obj.output()), key = lambda x: x[1], reverse = True) + for i in range(5): + print("%s = %f" % (sorted_list[i][0], sorted_list[i][1])) + print(clock.fps(), "fps") diff --git a/scripts/examples/25-Machine-Learning/tf_mobilenet_serach_just_center.py b/scripts/examples/25-Machine-Learning/tf_mobilenet_serach_just_center.py index e69de29bb..1c243533c 100644 --- a/scripts/examples/25-Machine-Learning/tf_mobilenet_serach_just_center.py +++ b/scripts/examples/25-Machine-Learning/tf_mobilenet_serach_just_center.py @@ -0,0 +1,62 @@ +# TensorFlow Lite Mobilenet V1 Example +# +# Google's Mobilenet V1 detects 1000 classes of objects +# +# WARNING: Mobilenet is trained on ImageNet and isn't meant to classify anything +# in the real world. It's just designed to score well on the ImageNet dataset. +# This example just shows off running mobilenet on the OpenMV Cam. However, the +# default model is not really usable for anything. You have to use transfer +# learning to apply the model to a target problem by re-training the model. +# +# NOTE: This example only works on the OpenMV Cam H7 Pro (that has SDRAM) and better! +# +# In this example we slide the detector window over the image and get a list +# of activations. Note that use a CNN with a sliding window is extremely compute +# expensive so for an exhaustive search do not expect the CNN to be real-time. + +import sensor, image, time, os, tf + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.set_windowing((240, 240)) # Set 240x240 window. +sensor.skip_frames(time=2000) # Let the camera adjust. + +mobilenet_version = "1" # 1 +mobilenet_width = "0.5" # 1.0, 0.75, 0.50, 0.25 +mobilenet_resolution = "128" # 224, 192, 160, 128 + +mobilenet = "mobilenet_v%s_%s_%s_quant.tflite" % (mobilenet_version, mobilenet_width, mobilenet_resolution) +labels = [line.rstrip('\n') for line in open("mobilenet_labels.txt")] + +clock = time.clock() +while(True): + clock.tick() + + img = sensor.snapshot() + + # net.classify() will run the network on an roi in the image (or on the whole image if the roi is not + # specified). A classification score output vector will be generated for each location. At each scale the + # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. + # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note + # the computational work load goes WAY up the more overlap. Finally, for multi-scale matching after + # sliding the network around in the x/y dimensions the detection window will shrink by scale_mul (0-1) + # down to min_scale (0-1). For example, if scale_mul is 0.5 the detection window will shrink by 50%. + # Note that at a lower scale there's even more area to search if x_overlap and y_overlap are small... + + # Setting x_overlap=-1 forces the window to stay centered in the ROI in the x direction always. If + # y_overlap is not -1 the method will search in all vertical positions. + + # Setting y_overlap=-1 forces the window to stay centered in the ROI in the y direction always. If + # x_overlap is not -1 the method will serach in all horizontal positions. + + # default settings just do one detection... change them to search the image... + for obj in tf.classify(mobilenet, img, min_scale=1.0, scale_mul=0.5, x_overlap=-1, y_overlap=-1): + print("**********\nTop 5 Detections at [x=%d,y=%d,w=%d,h=%d]" % obj.rect()) + img.draw_rectangle(obj.rect()) + # This combines the labels and confidence values into a list of tuples + # and then sorts that list by the confidence values. + sorted_list = sorted(zip(labels, obj.output()), key = lambda x: x[1], reverse = True) + for i in range(5): + print("%s = %f" % (sorted_list[i][0], sorted_list[i][1])) + print(clock.fps(), "fps") From 99fbb531000112fb463fd75f8b116f63eb14a277 Mon Sep 17 00:00:00 2001 From: "Kwabena W. Agyeman" Date: Wed, 30 Oct 2019 00:30:18 -0700 Subject: [PATCH 4/4] Add where models are. --- .../25-Machine-Learning/tf_mobilenet_search_whole_window.py | 2 ++ .../25-Machine-Learning/tf_mobilenet_serach_just_center.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/scripts/examples/25-Machine-Learning/tf_mobilenet_search_whole_window.py b/scripts/examples/25-Machine-Learning/tf_mobilenet_search_whole_window.py index 8a2c559d6..92ce31381 100644 --- a/scripts/examples/25-Machine-Learning/tf_mobilenet_search_whole_window.py +++ b/scripts/examples/25-Machine-Learning/tf_mobilenet_search_whole_window.py @@ -9,6 +9,8 @@ # learning to apply the model to a target problem by re-training the model. # # NOTE: This example only works on the OpenMV Cam H7 Pro (that has SDRAM) and better! +# To get the models please see the CNN Network library in OpenMV IDE under +# Tools -> Machine Vision. The labels are there too. # # In this example we slide the detector window over the image and get a list # of activations. Note that use a CNN with a sliding window is extremely compute diff --git a/scripts/examples/25-Machine-Learning/tf_mobilenet_serach_just_center.py b/scripts/examples/25-Machine-Learning/tf_mobilenet_serach_just_center.py index 1c243533c..1371a877f 100644 --- a/scripts/examples/25-Machine-Learning/tf_mobilenet_serach_just_center.py +++ b/scripts/examples/25-Machine-Learning/tf_mobilenet_serach_just_center.py @@ -9,6 +9,8 @@ # learning to apply the model to a target problem by re-training the model. # # NOTE: This example only works on the OpenMV Cam H7 Pro (that has SDRAM) and better! +# To get the models please see the CNN Network library in OpenMV IDE under +# Tools -> Machine Vision. The labels are there too. # # In this example we slide the detector window over the image and get a list # of activations. Note that use a CNN with a sliding window is extremely compute