From 6e7bcd2d26d3700b68d63a764179934374ee1d5a Mon Sep 17 00:00:00 2001 From: Mhrooz Date: Mon, 5 Aug 2024 23:36:58 +0200 Subject: [PATCH] add some comments --- cotracker/__init__.py | 10 +- cotracker/__pycache__/__init__.cpython-38.pyc | Bin 0 -> 125 bytes cotracker/__pycache__/__init__.cpython-39.pyc | Bin 0 -> 129 bytes .../__pycache__/predictor.cpython-38.pyc | Bin 0 -> 6064 bytes .../__pycache__/predictor.cpython-39.pyc | Bin 0 -> 6036 bytes cotracker/datasets/__init__.py | 10 +- cotracker/datasets/dataclass_utils.py | 332 +++--- cotracker/datasets/dr_dataset.py | 322 +++--- cotracker/datasets/kubric_movif_dataset.py | 882 +++++++------- cotracker/datasets/tap_vid_datasets.py | 418 +++---- cotracker/datasets/utils.py | 212 ++-- cotracker/evaluation/__init__.py | 10 +- .../configs/eval_dynamic_replica.yaml | 10 +- .../configs/eval_tapvid_davis_first.yaml | 10 +- .../configs/eval_tapvid_davis_strided.yaml | 10 +- .../configs/eval_tapvid_kinetics_first.yaml | 10 +- cotracker/evaluation/core/__init__.py | 10 +- cotracker/evaluation/core/eval_utils.py | 276 ++--- cotracker/evaluation/core/evaluator.py | 506 ++++---- cotracker/evaluation/evaluate.py | 338 +++--- cotracker/models/__init__.py | 10 +- .../__pycache__/__init__.cpython-38.pyc | Bin 0 -> 132 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 136 bytes .../build_cotracker.cpython-38.pyc | Bin 0 -> 867 bytes .../build_cotracker.cpython-39.pyc | Bin 0 -> 891 bytes cotracker/models/build_cotracker.py | 66 +- cotracker/models/core/__init__.py | 10 +- .../core/__pycache__/__init__.cpython-38.pyc | Bin 0 -> 137 bytes .../core/__pycache__/__init__.cpython-39.pyc | Bin 0 -> 141 bytes .../__pycache__/embeddings.cpython-38.pyc | Bin 0 -> 3578 bytes .../__pycache__/embeddings.cpython-39.pyc | Bin 0 -> 3546 bytes .../__pycache__/model_utils.cpython-38.pyc | Bin 0 -> 8269 bytes .../__pycache__/model_utils.cpython-39.pyc | Bin 0 -> 8255 bytes cotracker/models/core/cotracker/__init__.py | 10 +- .../__pycache__/__init__.cpython-38.pyc | Bin 0 -> 147 bytes .../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 151 bytes .../__pycache__/blocks.cpython-38.pyc | Bin 0 -> 9962 bytes .../__pycache__/blocks.cpython-39.pyc | Bin 0 -> 9937 bytes .../__pycache__/cotracker.cpython-38.pyc | Bin 0 -> 11890 bytes .../__pycache__/cotracker.cpython-39.pyc | Bin 0 -> 11807 bytes cotracker/models/core/cotracker/blocks.py | 735 ++++++------ cotracker/models/core/cotracker/cotracker.py | 1022 +++++++++-------- cotracker/models/core/cotracker/losses.py | 122 +- cotracker/models/core/embeddings.py | 240 ++-- cotracker/models/core/model_utils.py | 512 ++++----- cotracker/models/evaluation_predictor.py | 208 ++-- cotracker/predictor.py | 554 ++++----- cotracker/utils/__init__.py | 10 +- .../utils/__pycache__/__init__.cpython-38.pyc | Bin 0 -> 131 bytes .../utils/__pycache__/__init__.cpython-39.pyc | Bin 0 -> 135 bytes .../__pycache__/visualizer.cpython-38.pyc | Bin 0 -> 7729 bytes .../__pycache__/visualizer.cpython-39.pyc | Bin 0 -> 7706 bytes cotracker/utils/visualizer.py | 686 +++++------ cotracker/version.py | 16 +- notebooks/demo.ipynb | 474 +++----- 55 files changed, 3946 insertions(+), 4095 deletions(-) create mode 100644 cotracker/__pycache__/__init__.cpython-38.pyc create mode 100644 cotracker/__pycache__/__init__.cpython-39.pyc create mode 100644 cotracker/__pycache__/predictor.cpython-38.pyc create mode 100644 cotracker/__pycache__/predictor.cpython-39.pyc create mode 100644 cotracker/models/__pycache__/__init__.cpython-38.pyc create mode 100644 cotracker/models/__pycache__/__init__.cpython-39.pyc create mode 100644 cotracker/models/__pycache__/build_cotracker.cpython-38.pyc create mode 100644 cotracker/models/__pycache__/build_cotracker.cpython-39.pyc create mode 100644 cotracker/models/core/__pycache__/__init__.cpython-38.pyc create mode 100644 cotracker/models/core/__pycache__/__init__.cpython-39.pyc create mode 100644 cotracker/models/core/__pycache__/embeddings.cpython-38.pyc create mode 100644 cotracker/models/core/__pycache__/embeddings.cpython-39.pyc create mode 100644 cotracker/models/core/__pycache__/model_utils.cpython-38.pyc create mode 100644 cotracker/models/core/__pycache__/model_utils.cpython-39.pyc create mode 100644 cotracker/models/core/cotracker/__pycache__/__init__.cpython-38.pyc create mode 100644 cotracker/models/core/cotracker/__pycache__/__init__.cpython-39.pyc create mode 100644 cotracker/models/core/cotracker/__pycache__/blocks.cpython-38.pyc create mode 100644 cotracker/models/core/cotracker/__pycache__/blocks.cpython-39.pyc create mode 100644 cotracker/models/core/cotracker/__pycache__/cotracker.cpython-38.pyc create mode 100644 cotracker/models/core/cotracker/__pycache__/cotracker.cpython-39.pyc create mode 100644 cotracker/utils/__pycache__/__init__.cpython-38.pyc create mode 100644 cotracker/utils/__pycache__/__init__.cpython-39.pyc create mode 100644 cotracker/utils/__pycache__/visualizer.cpython-38.pyc create mode 100644 cotracker/utils/__pycache__/visualizer.cpython-39.pyc diff --git a/cotracker/__init__.py b/cotracker/__init__.py index 5277f46..4547e07 100644 --- a/cotracker/__init__.py +++ b/cotracker/__init__.py @@ -1,5 +1,5 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/cotracker/__pycache__/__init__.cpython-38.pyc b/cotracker/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..359e6a79e3a7164017a4aacd0db4ac0fe5f7fe83 GIT binary patch literal 125 zcmWIL<>g`k0>-S$v{OL(F^Gc<7=auIATDMB5-AM944RC7D;bJF!U*D*l8aSLa(+os mVsdtB5fUptJ~J<~BtBlRpz;=nO>TZlX-=vgNbhGLW&i*OavGrk literal 0 HcmV?d00001 diff --git a/cotracker/__pycache__/__init__.cpython-39.pyc b/cotracker/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14e03a24f3248904ebff3334b8a7c75e7192ae78 GIT binary patch literal 129 zcmYe~<>g`k0>-S$v{OL(F^Gc<7=auIATDMB5-AM944RC7D;bJF!U*D*ntpCxiGGTH qa(+osVsdtB5fUptJ~J<~BtBlRpz;=nO>TZlX-=vgNdIRbW&i+jHygbG literal 0 HcmV?d00001 diff --git a/cotracker/__pycache__/predictor.cpython-38.pyc b/cotracker/__pycache__/predictor.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41ef9a6e5e73bf006aacf7a25b72ab07ad8a77b9 GIT binary patch literal 6064 zcmai2O^h5z6|SoOneLvSo&E88{S&ix5-USsjUWmln8b-4Cq%FbiAi8EX^p3Pc4u~W zW>Vd2<5iD9*eEAUK9eBLN(kW_N4Rq001^jIbwvWL!~rRnBqA~2tM2WcT_?_ry1Kgl z>(zVT`(FL9S`8R}ymzznc!RNj(#Q1A#>WLb@iiopN#1A8?9DfYV4Q7QO`B7R=sTNk z)7>mJOXl0^dz*gK=j^8OvIX8 zvZj!;@3Ib;LRz=|rWM%}XNSqT$nLr3DMh8olco0^=}G^#bBi~9IWNnya+@~;xge{u zhFn?B$og&8th~!?)~sUXNA%pq;gxJ5uc=6O+sROUd~)Sndp&AzWUD_{EcNRvqh!4; zrWR`7K+45`7J&csp#4Gc2a@VO4kB3J5qGIy1qG-Q9o2sJnU~J-Qhr2An8br!g#$k zinL?uwH;k=_3v5Qj-!4@2m8BHRg7|vYd;Lz{Z<@@;XU^8qnDn!T5NVIH`MXDRc(3NOyGu&*&8`>{+r|Tw5 zZEl7g)!K~o>^11gjh2$3(U|UFtzDw2^B=8;o{MN0H1m3L2Ql2`g zE5#k_su-8X-rxz8a+HeHO6{!W=t63tr*B$XcS>2yw3K=sfkU#hEi{BWhDp?MBn?Z~ zTN1}Ibw*RQLLY&eZ;rN-D3sA4juM=e#9H+)scsM@*SmC;Zw(lkdYS)!zv zRl7zGv94V2#@%b(em8+Q)dDp#$r0pqsWVh0G#VgYM#AcKZgWBZPbzMn!&i+;J_>^E zc?@mDetkn;%zo|#_qTTsaeO_*dyuT?WeAJ2NH%i zE{`h-p9mO;p9Tv6p^mk~Coq(ry~D>3bgjc4eMS?idkl=>LkOM@LwBcwA&+0^LVj83gkKk=!p zX;nYa%B5D&mjL=wg|3v07R`?5l5$#3X90@d9cx_1S0$axzbfynqpo5!$6wtg<=-Wc zFlMmr#7Q(--nN#{E$gOU=)(ElW#<8{7d@@K{3{kpZM)PYmub+P@#XQ**u zZ_{Sj-g7iG1MR#(5rvZ?eZK09y|F(INMlnzfo`NebSR*c-Lc0!5j|*a0$}gKR|-z zG9Z11%^2)M-MZP8RqP5574;VC1I#=loqScWieqe2-eHppp6bqYopZRZBha>nUDYvy z*-u)hvhhqZ)2k;lli9uUV$Uw^782vBOLn^^$erI2leyG`jk!A(B;?!`s)D_+-u%Jc z)BTp248|Dw3tG&{nejq8mo9AB zz@??+SSoITcmOYZ<0IJ35y{fo$#GfU;Xu{wd`>`0CxN(&kpCmnL7&B2d`FCrKF~hb zJ)Hk+dL&&;kJ8$uz z$0>J~5^`Agrl3|kU7%w8@4x+(e*bx{aY{XbCc1>74C?aBx}4FfVZW6CWRtB?KT^be zbnW>#jucU^7nK?+?X>&DL8OC0bR$gQ?T3nBgk|e<)NpGcwT&ig+G>xsw0&(j?5hTi zWD#BV(K!zLT`=OWQ8!}vIxujbl@ei7r>S(QfU|?lC{|ykYO5a&K=_C%K1tsWcJTC5 z>KyWor76Te#4k>ueu`x97c{@3`9<{&Dko~I`KsoZ)HjiB28J3N@-~J30)*8{)|KYn zrk~S1a2}fZFus!$x`HSE1riq2#R4$>l<;^3e-F6@zVNBIAGiVVKH!VsmnZqj`{KT5 z*M-f`a`CBa32<-miS4+cFgBQlec!cK;49|Dl5mOaP(4E*%jIVH9PI!JB0VTb(2^lR6Y%~WxC%fWxW0M2snY}5f$_kM45`Ud18+-c z4*-9l#t;rGqoUF_kOSHZB2~&}0LAk1oR`=J$g?>_&y~&{13B4T1ft^rPAi8YT7#(( z9s3yDV3QKS6ch>Wi~c1zwlZj28SAyuGS=axWwz`=hvby$h9$TZ`GJ8Qka&rtBnHMe(&V z5-$QeGti$<9#f0`xVu7;_vz2ubrFAGUfnf|Ah+1Cb+sFZovwncP(+yr!fOij z3S#_UgK>r!8#E$noC=V}S0D+5bR>z4NUC3_mZ7($aRnh=l|u3OLSlj5S`!=SNQ5#& zk{nN_=&dzzFdk$L`XGRgWl9abbtTZulm%JY6utc(=&iM3tA8aWLvCG=TaU040i)jw z5`R(#&7PhUBNZgm0m-a^@`1E~@>!{iU0uLPtXHA6JTO-EA!F^2V`c^lQVHm!8QFs7 z)MV)mgcy?<3Xm}~0^zi5LW+8C79qtP&54i#yDVR4TWI~GUGK2MM zAjv*Rs||`eGp>(kWff$U2(FV!aj#b{Q6t5@5%(pE71h(ZjSRbMs~PDczLfzu!M0Iy zL#uFowW_|2qATh=Rggh9?5TkE1rz{^=^BtsQeC75^ZNkbJZUbw3{K~5!jT%bqd3N8 zk9wH~H7?;5%8~KxGpo!wyh>$sM`37_`T^yPPG6$ji-x_SF zKGhzy0%Zu%n^f@@B|oC1AVccM^x33@2+=+XQpS5+r7~l?`v!a(Wq-yKkC1@!fRqHF zI)@35N7$K@nVjg@2MNv`Ox^}TB7)-?g5z4JK#D+usPF%9yCaa#DU`S`O74f-%byDo z`5jN;hAywF4J%3wu{G2`ce=Z_&=>Px# literal 0 HcmV?d00001 diff --git a/cotracker/__pycache__/predictor.cpython-39.pyc b/cotracker/__pycache__/predictor.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8a66d39237b5572e2e042c60625bf41296befea GIT binary patch literal 6036 zcmai2-H#hr6~Fh+r^g>_JDbhsqh%XPS{K+LTD3xiHZ&zI72Os!P-;p?j_-KaUa#%m znN8?wMo1*6Pm%IWTgjG?AaA@P{sA5+;sFWiz9NBE;sL2IZPj-9ojYEycayZX=Fa_^ zbHC2{o!_~wayel5$v^$R^XWGj`xh1Fe>Msi@x)h=NG5roHS;&$5`uBIWi@S1HKOlq zxlMP=YkH<^_5H0vv%uN&Oxn`9$)pppp6CTPtY#5)uJlmn&FV^$zsef^F2-SN4WSG1 zR!b#eyOrq0^(YBP!|ou7!{H!oh3l#-8(c43-R|~f*d8XT)!vL$o4%d@`O`xDCNdFg za><%P*4|+qE`_vi7MfOMkDUpV^~mnI=J6sg@}>8lBYj!8>D=JWf;=XRvUHO-1Gy;6 zvVvSuF39Rl)-1ilY}PDe=ErsY((p??Ti%4+A-a<9bIen?^xQ7qkcyR2g_6y2)n~|Aq?C7Rvd@n9rnp1=eGvQ zd3mq{bGgy}cCC)yRt0>#fF~v?j0fE1KK}IHIeqsIukNpEO>y+U+;Ft_)^?=2Q4AG| z*0;i~R=lYTv!&{)$x@%U!;WfgMY?ts>T|uNWM~wrJ6LafH2CyKGot4bp7^IoQX$zk zQ|D4!@=bv^>#>xlPU=c=%X&j(Ugi%TMJ-3INUhY)dyX!q7QW1TTDQEsXX>SXN8rTl zd0$D@{-)i(;N2C3S) z)|JsvouFZsDLF~WY*g)K_7t9q!HOTC~k0raH|UGa<-)v|h0Op9p^Sn1!gvLZ^Qv_37B-r7K0 z$!JbtZI_gPm$1W_!HyFr(P(AIS~`RGv8E?l32ULPA85M zEr!3u^LDeCt87T?Zr0}Y)>Iu)UiAQ^J%cBP(*w#>48{=7oDp?V<;zs|&xxhIs;-DQ@RC_SBzg3(M}F|S(}6aF1ItqW}McWAD& zY2W7RcZmo##pV;sjXfreGqy=*l({rR*%HlCr%+C{h1Wch7{1FI}*GT)4lOzY|5rVUg&ku0a;2G|JDvX>plYK}{m*2X7gal(PA`T3l}E}aJ6E@A)ANC)36 z-QW|Eow%$2RQGWHwe)zpl%AluOM&rbrQ71Izo4}S%{1Cb=Rq1j)l8#}rf#aAr&+F< zSQ}PnP_nwFeR5`lsHLvl5!xmmqsy&+cYOd0R)a{z>T57m1EK0+$5F%Wfz&p-tZS=1+Sc~f;jphBqc1Ju z%msWLhy5;S@wezRqWL;7;GWkKy;5hXc6kP0hj~z}R;k(QM+1;NVu=KnSkxeS@(Fbw z`Nr}b!XIH1XV5-JDfqLRU(x)MdYtN?AVGXh^XJr4$TkDRj}29uLwf+ zC3RppH1bhAC#Q22Pppuzpeh!vW&WV>d5M=SAGt-o__^2%ynxriC6~Y^&+s#QV$Zj$ z!sh3=_}sPJ&uq^HSt;$g)+&5LT`UV1l*ZrlEtj9gH)ruj*{$*wUbW6yb+kW*-aCyi z@(PDI*ddZbCp{xM%ZB8PLH9uwK>RZYKum8pb$TE(Fb^1tAuV}o0Bjlk0nvB07(!tg zQff)tfC^|Sh?19&01D+#bADnQsLsa_Jy$xn45;K|5lm(Q+|r!GFfF2EpJbbC>;W!8 zf#9I%@4J&kv$u=6LqguQ|UlQSH7P^CLp<&BS zSn9QKs)fyf3OVIyR`Wh{Ou0$llbQl!5Pcj~)*qwg&v;_u(K-2eQY?u8iutfu6(xRw z?|l7>6zYhb0+mdt=d9%sgxRFa0I9D+JR+iLXf&;^P$W1&+ymGCw{Ye)Tl7|t!72)&{eE28-X zKZbnyyi#xjM7jC@Ls76{VkV0CuyQVVWY+Rg@Vfc3jV}-VNE_&9qUKqYU%W?@Uy3Nk zM|_V6-+#}R1sUWKzKP-k#9(E;Jlpt}2jnYevAo&i`-kioF@rA)*+Iz=BAx<~Lby&r zJ5dXNT$lyz)|zo5xiSZe7-W7D1m7k|S4GMZZZw1I-GLll4|9A5@6DB+f@f8w#O(Mi zU^9aLhWaCsb%`SDhwiuPS$utYZP!eK9ACrMia)~ zafbRDG$Qhw3edtAu@eZ{XeV;2sD7JThQNAR2_ajVLhbmXA+Xli0xA+^JcNdG1lAho z)EE>{3ZP?=(kX!jnwh#FubU&VKLUZZHf{B9T&?WWSC6n00ioY3B>p%6k)9tDl4dm0 z0gbGH)Pa_O)LE&ERb4cfG({pNpu&e`mA*jMfgCe3P>^as7>#5Lnp2kEs|X^-6$*ec zG6LPSXo7}?UKK$@jmAXK0D3KhY0zN$(q$5;6L!J8u|pkmcqh=TPB_8Z3W>ihlt@)^b`!j4B6kf^DPZhB)D_YE69? zRaevnY9NDd*mD8vv#0IVSdJY#OQ3{Fq`gd;UOavLnj<`uL*2+kvNnEe!2H3Io5LlS$!b3fWyxi20}FKlK$=kl8B z(S!*XE;ov*MFpZ_<~sJkk0?oX#%)BD>rir*l66WB)@xA9_=ycPe1IpW%LC&l?3p+C zSXk2qe4?mdqijyIdCXj6tPTdNo$W!JE-_nuEd~P}9F)zWHXoW@#;%$R&3F|tK$Pc0 zTz~dsbBkfrrMYrf=OcH|`3W70_ _X: - """ - Loads to a @dataclass or collection hierarchy including dataclasses - from a json recursively. - Call it like load_dataclass(f, typing.List[FrameAnnotationAnnotation]). - raises KeyError if json has keys not mapping to the dataclass fields. - - Args: - f: Either a path to a file, or a file opened for writing. - cls: The class of the loaded dataclass. - binary: Set to True if `f` is a file handle, else False. - """ - if binary: - asdict = json.loads(f.read().decode("utf8")) - else: - asdict = json.load(f) - - # in the list case, run a faster "vectorized" version - cls = get_args(cls)[0] - res = list(_dataclass_list_from_dict_list(asdict, cls)) - - return res - - -def _resolve_optional(type_: Any) -> Tuple[bool, Any]: - """Check whether `type_` is equivalent to `typing.Optional[T]` for some T.""" - if get_origin(type_) is Union: - args = get_args(type_) - if len(args) == 2 and args[1] == type(None): # noqa E721 - return True, args[0] - if type_ is Any: - return True, Any - - return False, type_ - - -def _unwrap_type(tp): - # strips Optional wrapper, if any - if get_origin(tp) is Union: - args = get_args(tp) - if len(args) == 2 and any(a is type(None) for a in args): # noqa: E721 - # this is typing.Optional - return args[0] if args[1] is type(None) else args[1] # noqa: E721 - return tp - - -def _get_dataclass_field_default(field: Field) -> Any: - if field.default_factory is not MISSING: - # pyre-fixme[29]: `Union[dataclasses._MISSING_TYPE, - # dataclasses._DefaultFactory[typing.Any]]` is not a function. - return field.default_factory() - elif field.default is not MISSING: - return field.default - else: - return None - - -def _dataclass_list_from_dict_list(dlist, typeannot): - """ - Vectorised version of `_dataclass_from_dict`. - The output should be equivalent to - `[_dataclass_from_dict(d, typeannot) for d in dlist]`. - - Args: - dlist: list of objects to convert. - typeannot: type of each of those objects. - Returns: - iterator or list over converted objects of the same length as `dlist`. - - Raises: - ValueError: it assumes the objects have None's in consistent places across - objects, otherwise it would ignore some values. This generally holds for - auto-generated annotations, but otherwise use `_dataclass_from_dict`. - """ - - cls = get_origin(typeannot) or typeannot - - if typeannot is Any: - return dlist - if all(obj is None for obj in dlist): # 1st recursion base: all None nodes - return dlist - if any(obj is None for obj in dlist): - # filter out Nones and recurse on the resulting list - idx_notnone = [(i, obj) for i, obj in enumerate(dlist) if obj is not None] - idx, notnone = zip(*idx_notnone) - converted = _dataclass_list_from_dict_list(notnone, typeannot) - res = [None] * len(dlist) - for i, obj in zip(idx, converted): - res[i] = obj - return res - - is_optional, contained_type = _resolve_optional(typeannot) - if is_optional: - return _dataclass_list_from_dict_list(dlist, contained_type) - - # otherwise, we dispatch by the type of the provided annotation to convert to - if issubclass(cls, tuple) and hasattr(cls, "_fields"): # namedtuple - # For namedtuple, call the function recursively on the lists of corresponding keys - types = cls.__annotations__.values() - dlist_T = zip(*dlist) - res_T = [ - _dataclass_list_from_dict_list(key_list, tp) for key_list, tp in zip(dlist_T, types) - ] - return [cls(*converted_as_tuple) for converted_as_tuple in zip(*res_T)] - elif issubclass(cls, (list, tuple)): - # For list/tuple, call the function recursively on the lists of corresponding positions - types = get_args(typeannot) - if len(types) == 1: # probably List; replicate for all items - types = types * len(dlist[0]) - dlist_T = zip(*dlist) - res_T = ( - _dataclass_list_from_dict_list(pos_list, tp) for pos_list, tp in zip(dlist_T, types) - ) - if issubclass(cls, tuple): - return list(zip(*res_T)) - else: - return [cls(converted_as_tuple) for converted_as_tuple in zip(*res_T)] - elif issubclass(cls, dict): - # For the dictionary, call the function recursively on concatenated keys and vertices - key_t, val_t = get_args(typeannot) - all_keys_res = _dataclass_list_from_dict_list( - [k for obj in dlist for k in obj.keys()], key_t - ) - all_vals_res = _dataclass_list_from_dict_list( - [k for obj in dlist for k in obj.values()], val_t - ) - indices = np.cumsum([len(obj) for obj in dlist]) - assert indices[-1] == len(all_keys_res) - - keys = np.split(list(all_keys_res), indices[:-1]) - all_vals_res_iter = iter(all_vals_res) - return [cls(zip(k, all_vals_res_iter)) for k in keys] - elif not dataclasses.is_dataclass(typeannot): - return dlist - - # dataclass node: 2nd recursion base; call the function recursively on the lists - # of the corresponding fields - assert dataclasses.is_dataclass(cls) - fieldtypes = { - f.name: (_unwrap_type(f.type), _get_dataclass_field_default(f)) - for f in dataclasses.fields(typeannot) - } - - # NOTE the default object is shared here - key_lists = ( - _dataclass_list_from_dict_list([obj.get(k, default) for obj in dlist], type_) - for k, (type_, default) in fieldtypes.items() - ) - transposed = zip(*key_lists) - return [cls(*vals_as_tuple) for vals_as_tuple in transposed] +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import json +import dataclasses +import numpy as np +from dataclasses import Field, MISSING +from typing import IO, TypeVar, Type, get_args, get_origin, Union, Any, Tuple + +_X = TypeVar("_X") + + +def load_dataclass(f: IO, cls: Type[_X], binary: bool = False) -> _X: + """ + Loads to a @dataclass or collection hierarchy including dataclasses + from a json recursively. + Call it like load_dataclass(f, typing.List[FrameAnnotationAnnotation]). + raises KeyError if json has keys not mapping to the dataclass fields. + + Args: + f: Either a path to a file, or a file opened for writing. + cls: The class of the loaded dataclass. + binary: Set to True if `f` is a file handle, else False. + """ + if binary: + asdict = json.loads(f.read().decode("utf8")) + else: + asdict = json.load(f) + + # in the list case, run a faster "vectorized" version + cls = get_args(cls)[0] + res = list(_dataclass_list_from_dict_list(asdict, cls)) + + return res + + +def _resolve_optional(type_: Any) -> Tuple[bool, Any]: + """Check whether `type_` is equivalent to `typing.Optional[T]` for some T.""" + if get_origin(type_) is Union: + args = get_args(type_) + if len(args) == 2 and args[1] == type(None): # noqa E721 + return True, args[0] + if type_ is Any: + return True, Any + + return False, type_ + + +def _unwrap_type(tp): + # strips Optional wrapper, if any + if get_origin(tp) is Union: + args = get_args(tp) + if len(args) == 2 and any(a is type(None) for a in args): # noqa: E721 + # this is typing.Optional + return args[0] if args[1] is type(None) else args[1] # noqa: E721 + return tp + + +def _get_dataclass_field_default(field: Field) -> Any: + if field.default_factory is not MISSING: + # pyre-fixme[29]: `Union[dataclasses._MISSING_TYPE, + # dataclasses._DefaultFactory[typing.Any]]` is not a function. + return field.default_factory() + elif field.default is not MISSING: + return field.default + else: + return None + + +def _dataclass_list_from_dict_list(dlist, typeannot): + """ + Vectorised version of `_dataclass_from_dict`. + The output should be equivalent to + `[_dataclass_from_dict(d, typeannot) for d in dlist]`. + + Args: + dlist: list of objects to convert. + typeannot: type of each of those objects. + Returns: + iterator or list over converted objects of the same length as `dlist`. + + Raises: + ValueError: it assumes the objects have None's in consistent places across + objects, otherwise it would ignore some values. This generally holds for + auto-generated annotations, but otherwise use `_dataclass_from_dict`. + """ + + cls = get_origin(typeannot) or typeannot + + if typeannot is Any: + return dlist + if all(obj is None for obj in dlist): # 1st recursion base: all None nodes + return dlist + if any(obj is None for obj in dlist): + # filter out Nones and recurse on the resulting list + idx_notnone = [(i, obj) for i, obj in enumerate(dlist) if obj is not None] + idx, notnone = zip(*idx_notnone) + converted = _dataclass_list_from_dict_list(notnone, typeannot) + res = [None] * len(dlist) + for i, obj in zip(idx, converted): + res[i] = obj + return res + + is_optional, contained_type = _resolve_optional(typeannot) + if is_optional: + return _dataclass_list_from_dict_list(dlist, contained_type) + + # otherwise, we dispatch by the type of the provided annotation to convert to + if issubclass(cls, tuple) and hasattr(cls, "_fields"): # namedtuple + # For namedtuple, call the function recursively on the lists of corresponding keys + types = cls.__annotations__.values() + dlist_T = zip(*dlist) + res_T = [ + _dataclass_list_from_dict_list(key_list, tp) for key_list, tp in zip(dlist_T, types) + ] + return [cls(*converted_as_tuple) for converted_as_tuple in zip(*res_T)] + elif issubclass(cls, (list, tuple)): + # For list/tuple, call the function recursively on the lists of corresponding positions + types = get_args(typeannot) + if len(types) == 1: # probably List; replicate for all items + types = types * len(dlist[0]) + dlist_T = zip(*dlist) + res_T = ( + _dataclass_list_from_dict_list(pos_list, tp) for pos_list, tp in zip(dlist_T, types) + ) + if issubclass(cls, tuple): + return list(zip(*res_T)) + else: + return [cls(converted_as_tuple) for converted_as_tuple in zip(*res_T)] + elif issubclass(cls, dict): + # For the dictionary, call the function recursively on concatenated keys and vertices + key_t, val_t = get_args(typeannot) + all_keys_res = _dataclass_list_from_dict_list( + [k for obj in dlist for k in obj.keys()], key_t + ) + all_vals_res = _dataclass_list_from_dict_list( + [k for obj in dlist for k in obj.values()], val_t + ) + indices = np.cumsum([len(obj) for obj in dlist]) + assert indices[-1] == len(all_keys_res) + + keys = np.split(list(all_keys_res), indices[:-1]) + all_vals_res_iter = iter(all_vals_res) + return [cls(zip(k, all_vals_res_iter)) for k in keys] + elif not dataclasses.is_dataclass(typeannot): + return dlist + + # dataclass node: 2nd recursion base; call the function recursively on the lists + # of the corresponding fields + assert dataclasses.is_dataclass(cls) + fieldtypes = { + f.name: (_unwrap_type(f.type), _get_dataclass_field_default(f)) + for f in dataclasses.fields(typeannot) + } + + # NOTE the default object is shared here + key_lists = ( + _dataclass_list_from_dict_list([obj.get(k, default) for obj in dlist], type_) + for k, (type_, default) in fieldtypes.items() + ) + transposed = zip(*key_lists) + return [cls(*vals_as_tuple) for vals_as_tuple in transposed] diff --git a/cotracker/datasets/dr_dataset.py b/cotracker/datasets/dr_dataset.py index 70af653..9a31884 100644 --- a/cotracker/datasets/dr_dataset.py +++ b/cotracker/datasets/dr_dataset.py @@ -1,161 +1,161 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -import os -import gzip -import torch -import numpy as np -import torch.utils.data as data -from collections import defaultdict -from dataclasses import dataclass -from typing import List, Optional, Any, Dict, Tuple - -from cotracker.datasets.utils import CoTrackerData -from cotracker.datasets.dataclass_utils import load_dataclass - - -@dataclass -class ImageAnnotation: - # path to jpg file, relative w.r.t. dataset_root - path: str - # H x W - size: Tuple[int, int] - - -@dataclass -class DynamicReplicaFrameAnnotation: - """A dataclass used to load annotations from json.""" - - # can be used to join with `SequenceAnnotation` - sequence_name: str - # 0-based, continuous frame number within sequence - frame_number: int - # timestamp in seconds from the video start - frame_timestamp: float - - image: ImageAnnotation - meta: Optional[Dict[str, Any]] = None - - camera_name: Optional[str] = None - trajectories: Optional[str] = None - - -class DynamicReplicaDataset(data.Dataset): - def __init__( - self, - root, - split="valid", - traj_per_sample=256, - crop_size=None, - sample_len=-1, - only_first_n_samples=-1, - rgbd_input=False, - ): - super(DynamicReplicaDataset, self).__init__() - self.root = root - self.sample_len = sample_len - self.split = split - self.traj_per_sample = traj_per_sample - self.rgbd_input = rgbd_input - self.crop_size = crop_size - frame_annotations_file = f"frame_annotations_{split}.jgz" - self.sample_list = [] - with gzip.open( - os.path.join(root, split, frame_annotations_file), "rt", encoding="utf8" - ) as zipfile: - frame_annots_list = load_dataclass(zipfile, List[DynamicReplicaFrameAnnotation]) - seq_annot = defaultdict(list) - for frame_annot in frame_annots_list: - if frame_annot.camera_name == "left": - seq_annot[frame_annot.sequence_name].append(frame_annot) - - for seq_name in seq_annot.keys(): - seq_len = len(seq_annot[seq_name]) - - step = self.sample_len if self.sample_len > 0 else seq_len - counter = 0 - - for ref_idx in range(0, seq_len, step): - sample = seq_annot[seq_name][ref_idx : ref_idx + step] - self.sample_list.append(sample) - counter += 1 - if only_first_n_samples > 0 and counter >= only_first_n_samples: - break - - def __len__(self): - return len(self.sample_list) - - def crop(self, rgbs, trajs): - T, N, _ = trajs.shape - - S = len(rgbs) - H, W = rgbs[0].shape[:2] - assert S == T - - H_new = H - W_new = W - - # simple random crop - y0 = 0 if self.crop_size[0] >= H_new else (H_new - self.crop_size[0]) // 2 - x0 = 0 if self.crop_size[1] >= W_new else (W_new - self.crop_size[1]) // 2 - rgbs = [rgb[y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]] for rgb in rgbs] - - trajs[:, :, 0] -= x0 - trajs[:, :, 1] -= y0 - - return rgbs, trajs - - def __getitem__(self, index): - sample = self.sample_list[index] - T = len(sample) - rgbs, visibilities, traj_2d = [], [], [] - - H, W = sample[0].image.size - image_size = (H, W) - - for i in range(T): - traj_path = os.path.join(self.root, self.split, sample[i].trajectories["path"]) - traj = torch.load(traj_path) - - visibilities.append(traj["verts_inds_vis"].numpy()) - - rgbs.append(traj["img"].numpy()) - traj_2d.append(traj["traj_2d"].numpy()[..., :2]) - - traj_2d = np.stack(traj_2d) - visibility = np.stack(visibilities) - T, N, D = traj_2d.shape - # subsample trajectories for augmentations - visible_inds_sampled = torch.randperm(N)[: self.traj_per_sample] - - traj_2d = traj_2d[:, visible_inds_sampled] - visibility = visibility[:, visible_inds_sampled] - - if self.crop_size is not None: - rgbs, traj_2d = self.crop(rgbs, traj_2d) - H, W, _ = rgbs[0].shape - image_size = self.crop_size - - visibility[traj_2d[:, :, 0] > image_size[1] - 1] = False - visibility[traj_2d[:, :, 0] < 0] = False - visibility[traj_2d[:, :, 1] > image_size[0] - 1] = False - visibility[traj_2d[:, :, 1] < 0] = False - - # filter out points that're visible for less than 10 frames - visible_inds_resampled = visibility.sum(0) > 10 - traj_2d = torch.from_numpy(traj_2d[:, visible_inds_resampled]) - visibility = torch.from_numpy(visibility[:, visible_inds_resampled]) - - rgbs = np.stack(rgbs, 0) - video = torch.from_numpy(rgbs).reshape(T, H, W, 3).permute(0, 3, 1, 2).float() - return CoTrackerData( - video=video, - trajectory=traj_2d, - visibility=visibility, - valid=torch.ones(T, N), - seq_name=sample[0].sequence_name, - ) +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import os +import gzip +import torch +import numpy as np +import torch.utils.data as data +from collections import defaultdict +from dataclasses import dataclass +from typing import List, Optional, Any, Dict, Tuple + +from cotracker.datasets.utils import CoTrackerData +from cotracker.datasets.dataclass_utils import load_dataclass + + +@dataclass +class ImageAnnotation: + # path to jpg file, relative w.r.t. dataset_root + path: str + # H x W + size: Tuple[int, int] + + +@dataclass +class DynamicReplicaFrameAnnotation: + """A dataclass used to load annotations from json.""" + + # can be used to join with `SequenceAnnotation` + sequence_name: str + # 0-based, continuous frame number within sequence + frame_number: int + # timestamp in seconds from the video start + frame_timestamp: float + + image: ImageAnnotation + meta: Optional[Dict[str, Any]] = None + + camera_name: Optional[str] = None + trajectories: Optional[str] = None + + +class DynamicReplicaDataset(data.Dataset): + def __init__( + self, + root, + split="valid", + traj_per_sample=256, + crop_size=None, + sample_len=-1, + only_first_n_samples=-1, + rgbd_input=False, + ): + super(DynamicReplicaDataset, self).__init__() + self.root = root + self.sample_len = sample_len + self.split = split + self.traj_per_sample = traj_per_sample + self.rgbd_input = rgbd_input + self.crop_size = crop_size + frame_annotations_file = f"frame_annotations_{split}.jgz" + self.sample_list = [] + with gzip.open( + os.path.join(root, split, frame_annotations_file), "rt", encoding="utf8" + ) as zipfile: + frame_annots_list = load_dataclass(zipfile, List[DynamicReplicaFrameAnnotation]) + seq_annot = defaultdict(list) + for frame_annot in frame_annots_list: + if frame_annot.camera_name == "left": + seq_annot[frame_annot.sequence_name].append(frame_annot) + + for seq_name in seq_annot.keys(): + seq_len = len(seq_annot[seq_name]) + + step = self.sample_len if self.sample_len > 0 else seq_len + counter = 0 + + for ref_idx in range(0, seq_len, step): + sample = seq_annot[seq_name][ref_idx : ref_idx + step] + self.sample_list.append(sample) + counter += 1 + if only_first_n_samples > 0 and counter >= only_first_n_samples: + break + + def __len__(self): + return len(self.sample_list) + + def crop(self, rgbs, trajs): + T, N, _ = trajs.shape + + S = len(rgbs) + H, W = rgbs[0].shape[:2] + assert S == T + + H_new = H + W_new = W + + # simple random crop + y0 = 0 if self.crop_size[0] >= H_new else (H_new - self.crop_size[0]) // 2 + x0 = 0 if self.crop_size[1] >= W_new else (W_new - self.crop_size[1]) // 2 + rgbs = [rgb[y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]] for rgb in rgbs] + + trajs[:, :, 0] -= x0 + trajs[:, :, 1] -= y0 + + return rgbs, trajs + + def __getitem__(self, index): + sample = self.sample_list[index] + T = len(sample) + rgbs, visibilities, traj_2d = [], [], [] + + H, W = sample[0].image.size + image_size = (H, W) + + for i in range(T): + traj_path = os.path.join(self.root, self.split, sample[i].trajectories["path"]) + traj = torch.load(traj_path) + + visibilities.append(traj["verts_inds_vis"].numpy()) + + rgbs.append(traj["img"].numpy()) + traj_2d.append(traj["traj_2d"].numpy()[..., :2]) + + traj_2d = np.stack(traj_2d) + visibility = np.stack(visibilities) + T, N, D = traj_2d.shape + # subsample trajectories for augmentations + visible_inds_sampled = torch.randperm(N)[: self.traj_per_sample] + + traj_2d = traj_2d[:, visible_inds_sampled] + visibility = visibility[:, visible_inds_sampled] + + if self.crop_size is not None: + rgbs, traj_2d = self.crop(rgbs, traj_2d) + H, W, _ = rgbs[0].shape + image_size = self.crop_size + + visibility[traj_2d[:, :, 0] > image_size[1] - 1] = False + visibility[traj_2d[:, :, 0] < 0] = False + visibility[traj_2d[:, :, 1] > image_size[0] - 1] = False + visibility[traj_2d[:, :, 1] < 0] = False + + # filter out points that're visible for less than 10 frames + visible_inds_resampled = visibility.sum(0) > 10 + traj_2d = torch.from_numpy(traj_2d[:, visible_inds_resampled]) + visibility = torch.from_numpy(visibility[:, visible_inds_resampled]) + + rgbs = np.stack(rgbs, 0) + video = torch.from_numpy(rgbs).reshape(T, H, W, 3).permute(0, 3, 1, 2).float() + return CoTrackerData( + video=video, + trajectory=traj_2d, + visibility=visibility, + valid=torch.ones(T, N), + seq_name=sample[0].sequence_name, + ) diff --git a/cotracker/datasets/kubric_movif_dataset.py b/cotracker/datasets/kubric_movif_dataset.py index 366d738..68ce73c 100644 --- a/cotracker/datasets/kubric_movif_dataset.py +++ b/cotracker/datasets/kubric_movif_dataset.py @@ -1,441 +1,441 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import os -import torch -import cv2 - -import imageio -import numpy as np - -from cotracker.datasets.utils import CoTrackerData -from torchvision.transforms import ColorJitter, GaussianBlur -from PIL import Image - - -class CoTrackerDataset(torch.utils.data.Dataset): - def __init__( - self, - data_root, - crop_size=(384, 512), - seq_len=24, - traj_per_sample=768, - sample_vis_1st_frame=False, - use_augs=False, - ): - super(CoTrackerDataset, self).__init__() - np.random.seed(0) - torch.manual_seed(0) - self.data_root = data_root - self.seq_len = seq_len - self.traj_per_sample = traj_per_sample - self.sample_vis_1st_frame = sample_vis_1st_frame - self.use_augs = use_augs - self.crop_size = crop_size - - # photometric augmentation - self.photo_aug = ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.25 / 3.14) - self.blur_aug = GaussianBlur(11, sigma=(0.1, 2.0)) - - self.blur_aug_prob = 0.25 - self.color_aug_prob = 0.25 - - # occlusion augmentation - self.eraser_aug_prob = 0.5 - self.eraser_bounds = [2, 100] - self.eraser_max = 10 - - # occlusion augmentation - self.replace_aug_prob = 0.5 - self.replace_bounds = [2, 100] - self.replace_max = 10 - - # spatial augmentations - self.pad_bounds = [0, 100] - self.crop_size = crop_size - self.resize_lim = [0.25, 2.0] # sample resizes from here - self.resize_delta = 0.2 - self.max_crop_offset = 50 - - self.do_flip = True - self.h_flip_prob = 0.5 - self.v_flip_prob = 0.5 - - def getitem_helper(self, index): - return NotImplementedError - - def __getitem__(self, index): - gotit = False - - sample, gotit = self.getitem_helper(index) - if not gotit: - print("warning: sampling failed") - # fake sample, so we can still collate - sample = CoTrackerData( - video=torch.zeros((self.seq_len, 3, self.crop_size[0], self.crop_size[1])), - trajectory=torch.zeros((self.seq_len, self.traj_per_sample, 2)), - visibility=torch.zeros((self.seq_len, self.traj_per_sample)), - valid=torch.zeros((self.seq_len, self.traj_per_sample)), - ) - - return sample, gotit - - def add_photometric_augs(self, rgbs, trajs, visibles, eraser=True, replace=True): - T, N, _ = trajs.shape - - S = len(rgbs) - H, W = rgbs[0].shape[:2] - assert S == T - - if eraser: - ############ eraser transform (per image after the first) ############ - rgbs = [rgb.astype(np.float32) for rgb in rgbs] - for i in range(1, S): - if np.random.rand() < self.eraser_aug_prob: - for _ in range( - np.random.randint(1, self.eraser_max + 1) - ): # number of times to occlude - xc = np.random.randint(0, W) - yc = np.random.randint(0, H) - dx = np.random.randint(self.eraser_bounds[0], self.eraser_bounds[1]) - dy = np.random.randint(self.eraser_bounds[0], self.eraser_bounds[1]) - x0 = np.clip(xc - dx / 2, 0, W - 1).round().astype(np.int32) - x1 = np.clip(xc + dx / 2, 0, W - 1).round().astype(np.int32) - y0 = np.clip(yc - dy / 2, 0, H - 1).round().astype(np.int32) - y1 = np.clip(yc + dy / 2, 0, H - 1).round().astype(np.int32) - - mean_color = np.mean(rgbs[i][y0:y1, x0:x1, :].reshape(-1, 3), axis=0) - rgbs[i][y0:y1, x0:x1, :] = mean_color - - occ_inds = np.logical_and( - np.logical_and(trajs[i, :, 0] >= x0, trajs[i, :, 0] < x1), - np.logical_and(trajs[i, :, 1] >= y0, trajs[i, :, 1] < y1), - ) - visibles[i, occ_inds] = 0 - rgbs = [rgb.astype(np.uint8) for rgb in rgbs] - - if replace: - rgbs_alt = [ - np.array(self.photo_aug(Image.fromarray(rgb)), dtype=np.uint8) for rgb in rgbs - ] - rgbs_alt = [ - np.array(self.photo_aug(Image.fromarray(rgb)), dtype=np.uint8) for rgb in rgbs_alt - ] - - ############ replace transform (per image after the first) ############ - rgbs = [rgb.astype(np.float32) for rgb in rgbs] - rgbs_alt = [rgb.astype(np.float32) for rgb in rgbs_alt] - for i in range(1, S): - if np.random.rand() < self.replace_aug_prob: - for _ in range( - np.random.randint(1, self.replace_max + 1) - ): # number of times to occlude - xc = np.random.randint(0, W) - yc = np.random.randint(0, H) - dx = np.random.randint(self.replace_bounds[0], self.replace_bounds[1]) - dy = np.random.randint(self.replace_bounds[0], self.replace_bounds[1]) - x0 = np.clip(xc - dx / 2, 0, W - 1).round().astype(np.int32) - x1 = np.clip(xc + dx / 2, 0, W - 1).round().astype(np.int32) - y0 = np.clip(yc - dy / 2, 0, H - 1).round().astype(np.int32) - y1 = np.clip(yc + dy / 2, 0, H - 1).round().astype(np.int32) - - wid = x1 - x0 - hei = y1 - y0 - y00 = np.random.randint(0, H - hei) - x00 = np.random.randint(0, W - wid) - fr = np.random.randint(0, S) - rep = rgbs_alt[fr][y00 : y00 + hei, x00 : x00 + wid, :] - rgbs[i][y0:y1, x0:x1, :] = rep - - occ_inds = np.logical_and( - np.logical_and(trajs[i, :, 0] >= x0, trajs[i, :, 0] < x1), - np.logical_and(trajs[i, :, 1] >= y0, trajs[i, :, 1] < y1), - ) - visibles[i, occ_inds] = 0 - rgbs = [rgb.astype(np.uint8) for rgb in rgbs] - - ############ photometric augmentation ############ - if np.random.rand() < self.color_aug_prob: - # random per-frame amount of aug - rgbs = [np.array(self.photo_aug(Image.fromarray(rgb)), dtype=np.uint8) for rgb in rgbs] - - if np.random.rand() < self.blur_aug_prob: - # random per-frame amount of blur - rgbs = [np.array(self.blur_aug(Image.fromarray(rgb)), dtype=np.uint8) for rgb in rgbs] - - return rgbs, trajs, visibles - - def add_spatial_augs(self, rgbs, trajs, visibles): - T, N, __ = trajs.shape - - S = len(rgbs) - H, W = rgbs[0].shape[:2] - assert S == T - - rgbs = [rgb.astype(np.float32) for rgb in rgbs] - - ############ spatial transform ############ - - # padding - pad_x0 = np.random.randint(self.pad_bounds[0], self.pad_bounds[1]) - pad_x1 = np.random.randint(self.pad_bounds[0], self.pad_bounds[1]) - pad_y0 = np.random.randint(self.pad_bounds[0], self.pad_bounds[1]) - pad_y1 = np.random.randint(self.pad_bounds[0], self.pad_bounds[1]) - - rgbs = [np.pad(rgb, ((pad_y0, pad_y1), (pad_x0, pad_x1), (0, 0))) for rgb in rgbs] - trajs[:, :, 0] += pad_x0 - trajs[:, :, 1] += pad_y0 - H, W = rgbs[0].shape[:2] - - # scaling + stretching - scale = np.random.uniform(self.resize_lim[0], self.resize_lim[1]) - scale_x = scale - scale_y = scale - H_new = H - W_new = W - - scale_delta_x = 0.0 - scale_delta_y = 0.0 - - rgbs_scaled = [] - for s in range(S): - if s == 1: - scale_delta_x = np.random.uniform(-self.resize_delta, self.resize_delta) - scale_delta_y = np.random.uniform(-self.resize_delta, self.resize_delta) - elif s > 1: - scale_delta_x = ( - scale_delta_x * 0.8 - + np.random.uniform(-self.resize_delta, self.resize_delta) * 0.2 - ) - scale_delta_y = ( - scale_delta_y * 0.8 - + np.random.uniform(-self.resize_delta, self.resize_delta) * 0.2 - ) - scale_x = scale_x + scale_delta_x - scale_y = scale_y + scale_delta_y - - # bring h/w closer - scale_xy = (scale_x + scale_y) * 0.5 - scale_x = scale_x * 0.5 + scale_xy * 0.5 - scale_y = scale_y * 0.5 + scale_xy * 0.5 - - # don't get too crazy - scale_x = np.clip(scale_x, 0.2, 2.0) - scale_y = np.clip(scale_y, 0.2, 2.0) - - H_new = int(H * scale_y) - W_new = int(W * scale_x) - - # make it at least slightly bigger than the crop area, - # so that the random cropping can add diversity - H_new = np.clip(H_new, self.crop_size[0] + 10, None) - W_new = np.clip(W_new, self.crop_size[1] + 10, None) - # recompute scale in case we clipped - scale_x = (W_new - 1) / float(W - 1) - scale_y = (H_new - 1) / float(H - 1) - rgbs_scaled.append(cv2.resize(rgbs[s], (W_new, H_new), interpolation=cv2.INTER_LINEAR)) - trajs[s, :, 0] *= scale_x - trajs[s, :, 1] *= scale_y - rgbs = rgbs_scaled - - ok_inds = visibles[0, :] > 0 - vis_trajs = trajs[:, ok_inds] # S,?,2 - - if vis_trajs.shape[1] > 0: - mid_x = np.mean(vis_trajs[0, :, 0]) - mid_y = np.mean(vis_trajs[0, :, 1]) - else: - mid_y = self.crop_size[0] - mid_x = self.crop_size[1] - - x0 = int(mid_x - self.crop_size[1] // 2) - y0 = int(mid_y - self.crop_size[0] // 2) - - offset_x = 0 - offset_y = 0 - - for s in range(S): - # on each frame, shift a bit more - if s == 1: - offset_x = np.random.randint(-self.max_crop_offset, self.max_crop_offset) - offset_y = np.random.randint(-self.max_crop_offset, self.max_crop_offset) - elif s > 1: - offset_x = int( - offset_x * 0.8 - + np.random.randint(-self.max_crop_offset, self.max_crop_offset + 1) * 0.2 - ) - offset_y = int( - offset_y * 0.8 - + np.random.randint(-self.max_crop_offset, self.max_crop_offset + 1) * 0.2 - ) - x0 = x0 + offset_x - y0 = y0 + offset_y - - H_new, W_new = rgbs[s].shape[:2] - if H_new == self.crop_size[0]: - y0 = 0 - else: - y0 = min(max(0, y0), H_new - self.crop_size[0] - 1) - - if W_new == self.crop_size[1]: - x0 = 0 - else: - x0 = min(max(0, x0), W_new - self.crop_size[1] - 1) - - rgbs[s] = rgbs[s][y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]] - trajs[s, :, 0] -= x0 - trajs[s, :, 1] -= y0 - - H_new = self.crop_size[0] - W_new = self.crop_size[1] - - # flip - h_flipped = False - v_flipped = False - if self.do_flip: - # h flip - if np.random.rand() < self.h_flip_prob: - h_flipped = True - rgbs = [rgb[:, ::-1] for rgb in rgbs] - # v flip - if np.random.rand() < self.v_flip_prob: - v_flipped = True - rgbs = [rgb[::-1] for rgb in rgbs] - if h_flipped: - trajs[:, :, 0] = W_new - trajs[:, :, 0] - if v_flipped: - trajs[:, :, 1] = H_new - trajs[:, :, 1] - - return rgbs, trajs - - def crop(self, rgbs, trajs): - T, N, _ = trajs.shape - - S = len(rgbs) - H, W = rgbs[0].shape[:2] - assert S == T - - ############ spatial transform ############ - - H_new = H - W_new = W - - # simple random crop - y0 = 0 if self.crop_size[0] >= H_new else np.random.randint(0, H_new - self.crop_size[0]) - x0 = 0 if self.crop_size[1] >= W_new else np.random.randint(0, W_new - self.crop_size[1]) - rgbs = [rgb[y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]] for rgb in rgbs] - - trajs[:, :, 0] -= x0 - trajs[:, :, 1] -= y0 - - return rgbs, trajs - - -class KubricMovifDataset(CoTrackerDataset): - def __init__( - self, - data_root, - crop_size=(384, 512), - seq_len=24, - traj_per_sample=768, - sample_vis_1st_frame=False, - use_augs=False, - ): - super(KubricMovifDataset, self).__init__( - data_root=data_root, - crop_size=crop_size, - seq_len=seq_len, - traj_per_sample=traj_per_sample, - sample_vis_1st_frame=sample_vis_1st_frame, - use_augs=use_augs, - ) - - self.pad_bounds = [0, 25] - self.resize_lim = [0.75, 1.25] # sample resizes from here - self.resize_delta = 0.05 - self.max_crop_offset = 15 - self.seq_names = [ - fname - for fname in os.listdir(data_root) - if os.path.isdir(os.path.join(data_root, fname)) - ] - print("found %d unique videos in %s" % (len(self.seq_names), self.data_root)) - - def getitem_helper(self, index): - gotit = True - seq_name = self.seq_names[index] - - npy_path = os.path.join(self.data_root, seq_name, seq_name + ".npy") - rgb_path = os.path.join(self.data_root, seq_name, "frames") - - img_paths = sorted(os.listdir(rgb_path)) - rgbs = [] - for i, img_path in enumerate(img_paths): - rgbs.append(imageio.v2.imread(os.path.join(rgb_path, img_path))) - - rgbs = np.stack(rgbs) - annot_dict = np.load(npy_path, allow_pickle=True).item() - traj_2d = annot_dict["coords"] - visibility = annot_dict["visibility"] - - # random crop - assert self.seq_len <= len(rgbs) - if self.seq_len < len(rgbs): - start_ind = np.random.choice(len(rgbs) - self.seq_len, 1)[0] - - rgbs = rgbs[start_ind : start_ind + self.seq_len] - traj_2d = traj_2d[:, start_ind : start_ind + self.seq_len] - visibility = visibility[:, start_ind : start_ind + self.seq_len] - - traj_2d = np.transpose(traj_2d, (1, 0, 2)) - visibility = np.transpose(np.logical_not(visibility), (1, 0)) - if self.use_augs: - rgbs, traj_2d, visibility = self.add_photometric_augs(rgbs, traj_2d, visibility) - rgbs, traj_2d = self.add_spatial_augs(rgbs, traj_2d, visibility) - else: - rgbs, traj_2d = self.crop(rgbs, traj_2d) - - visibility[traj_2d[:, :, 0] > self.crop_size[1] - 1] = False - visibility[traj_2d[:, :, 0] < 0] = False - visibility[traj_2d[:, :, 1] > self.crop_size[0] - 1] = False - visibility[traj_2d[:, :, 1] < 0] = False - - visibility = torch.from_numpy(visibility) - traj_2d = torch.from_numpy(traj_2d) - - visibile_pts_first_frame_inds = (visibility[0]).nonzero(as_tuple=False)[:, 0] - - if self.sample_vis_1st_frame: - visibile_pts_inds = visibile_pts_first_frame_inds - else: - visibile_pts_mid_frame_inds = (visibility[self.seq_len // 2]).nonzero(as_tuple=False)[ - :, 0 - ] - visibile_pts_inds = torch.cat( - (visibile_pts_first_frame_inds, visibile_pts_mid_frame_inds), dim=0 - ) - point_inds = torch.randperm(len(visibile_pts_inds))[: self.traj_per_sample] - if len(point_inds) < self.traj_per_sample: - gotit = False - - visible_inds_sampled = visibile_pts_inds[point_inds] - - trajs = traj_2d[:, visible_inds_sampled].float() - visibles = visibility[:, visible_inds_sampled] - valids = torch.ones((self.seq_len, self.traj_per_sample)) - - rgbs = torch.from_numpy(np.stack(rgbs)).permute(0, 3, 1, 2).float() - sample = CoTrackerData( - video=rgbs, - trajectory=trajs, - visibility=visibles, - valid=valids, - seq_name=seq_name, - ) - return sample, gotit - - def __len__(self): - return len(self.seq_names) +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import os +import torch +import cv2 + +import imageio +import numpy as np + +from cotracker.datasets.utils import CoTrackerData +from torchvision.transforms import ColorJitter, GaussianBlur +from PIL import Image + + +class CoTrackerDataset(torch.utils.data.Dataset): + def __init__( + self, + data_root, + crop_size=(384, 512), + seq_len=24, + traj_per_sample=768, + sample_vis_1st_frame=False, + use_augs=False, + ): + super(CoTrackerDataset, self).__init__() + np.random.seed(0) + torch.manual_seed(0) + self.data_root = data_root + self.seq_len = seq_len + self.traj_per_sample = traj_per_sample + self.sample_vis_1st_frame = sample_vis_1st_frame + self.use_augs = use_augs + self.crop_size = crop_size + + # photometric augmentation + self.photo_aug = ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.25 / 3.14) + self.blur_aug = GaussianBlur(11, sigma=(0.1, 2.0)) + + self.blur_aug_prob = 0.25 + self.color_aug_prob = 0.25 + + # occlusion augmentation + self.eraser_aug_prob = 0.5 + self.eraser_bounds = [2, 100] + self.eraser_max = 10 + + # occlusion augmentation + self.replace_aug_prob = 0.5 + self.replace_bounds = [2, 100] + self.replace_max = 10 + + # spatial augmentations + self.pad_bounds = [0, 100] + self.crop_size = crop_size + self.resize_lim = [0.25, 2.0] # sample resizes from here + self.resize_delta = 0.2 + self.max_crop_offset = 50 + + self.do_flip = True + self.h_flip_prob = 0.5 + self.v_flip_prob = 0.5 + + def getitem_helper(self, index): + return NotImplementedError + + def __getitem__(self, index): + gotit = False + + sample, gotit = self.getitem_helper(index) + if not gotit: + print("warning: sampling failed") + # fake sample, so we can still collate + sample = CoTrackerData( + video=torch.zeros((self.seq_len, 3, self.crop_size[0], self.crop_size[1])), + trajectory=torch.zeros((self.seq_len, self.traj_per_sample, 2)), + visibility=torch.zeros((self.seq_len, self.traj_per_sample)), + valid=torch.zeros((self.seq_len, self.traj_per_sample)), + ) + + return sample, gotit + + def add_photometric_augs(self, rgbs, trajs, visibles, eraser=True, replace=True): + T, N, _ = trajs.shape + + S = len(rgbs) + H, W = rgbs[0].shape[:2] + assert S == T + + if eraser: + ############ eraser transform (per image after the first) ############ + rgbs = [rgb.astype(np.float32) for rgb in rgbs] + for i in range(1, S): + if np.random.rand() < self.eraser_aug_prob: + for _ in range( + np.random.randint(1, self.eraser_max + 1) + ): # number of times to occlude + xc = np.random.randint(0, W) + yc = np.random.randint(0, H) + dx = np.random.randint(self.eraser_bounds[0], self.eraser_bounds[1]) + dy = np.random.randint(self.eraser_bounds[0], self.eraser_bounds[1]) + x0 = np.clip(xc - dx / 2, 0, W - 1).round().astype(np.int32) + x1 = np.clip(xc + dx / 2, 0, W - 1).round().astype(np.int32) + y0 = np.clip(yc - dy / 2, 0, H - 1).round().astype(np.int32) + y1 = np.clip(yc + dy / 2, 0, H - 1).round().astype(np.int32) + + mean_color = np.mean(rgbs[i][y0:y1, x0:x1, :].reshape(-1, 3), axis=0) + rgbs[i][y0:y1, x0:x1, :] = mean_color + + occ_inds = np.logical_and( + np.logical_and(trajs[i, :, 0] >= x0, trajs[i, :, 0] < x1), + np.logical_and(trajs[i, :, 1] >= y0, trajs[i, :, 1] < y1), + ) + visibles[i, occ_inds] = 0 + rgbs = [rgb.astype(np.uint8) for rgb in rgbs] + + if replace: + rgbs_alt = [ + np.array(self.photo_aug(Image.fromarray(rgb)), dtype=np.uint8) for rgb in rgbs + ] + rgbs_alt = [ + np.array(self.photo_aug(Image.fromarray(rgb)), dtype=np.uint8) for rgb in rgbs_alt + ] + + ############ replace transform (per image after the first) ############ + rgbs = [rgb.astype(np.float32) for rgb in rgbs] + rgbs_alt = [rgb.astype(np.float32) for rgb in rgbs_alt] + for i in range(1, S): + if np.random.rand() < self.replace_aug_prob: + for _ in range( + np.random.randint(1, self.replace_max + 1) + ): # number of times to occlude + xc = np.random.randint(0, W) + yc = np.random.randint(0, H) + dx = np.random.randint(self.replace_bounds[0], self.replace_bounds[1]) + dy = np.random.randint(self.replace_bounds[0], self.replace_bounds[1]) + x0 = np.clip(xc - dx / 2, 0, W - 1).round().astype(np.int32) + x1 = np.clip(xc + dx / 2, 0, W - 1).round().astype(np.int32) + y0 = np.clip(yc - dy / 2, 0, H - 1).round().astype(np.int32) + y1 = np.clip(yc + dy / 2, 0, H - 1).round().astype(np.int32) + + wid = x1 - x0 + hei = y1 - y0 + y00 = np.random.randint(0, H - hei) + x00 = np.random.randint(0, W - wid) + fr = np.random.randint(0, S) + rep = rgbs_alt[fr][y00 : y00 + hei, x00 : x00 + wid, :] + rgbs[i][y0:y1, x0:x1, :] = rep + + occ_inds = np.logical_and( + np.logical_and(trajs[i, :, 0] >= x0, trajs[i, :, 0] < x1), + np.logical_and(trajs[i, :, 1] >= y0, trajs[i, :, 1] < y1), + ) + visibles[i, occ_inds] = 0 + rgbs = [rgb.astype(np.uint8) for rgb in rgbs] + + ############ photometric augmentation ############ + if np.random.rand() < self.color_aug_prob: + # random per-frame amount of aug + rgbs = [np.array(self.photo_aug(Image.fromarray(rgb)), dtype=np.uint8) for rgb in rgbs] + + if np.random.rand() < self.blur_aug_prob: + # random per-frame amount of blur + rgbs = [np.array(self.blur_aug(Image.fromarray(rgb)), dtype=np.uint8) for rgb in rgbs] + + return rgbs, trajs, visibles + + def add_spatial_augs(self, rgbs, trajs, visibles): + T, N, __ = trajs.shape + + S = len(rgbs) + H, W = rgbs[0].shape[:2] + assert S == T + + rgbs = [rgb.astype(np.float32) for rgb in rgbs] + + ############ spatial transform ############ + + # padding + pad_x0 = np.random.randint(self.pad_bounds[0], self.pad_bounds[1]) + pad_x1 = np.random.randint(self.pad_bounds[0], self.pad_bounds[1]) + pad_y0 = np.random.randint(self.pad_bounds[0], self.pad_bounds[1]) + pad_y1 = np.random.randint(self.pad_bounds[0], self.pad_bounds[1]) + + rgbs = [np.pad(rgb, ((pad_y0, pad_y1), (pad_x0, pad_x1), (0, 0))) for rgb in rgbs] + trajs[:, :, 0] += pad_x0 + trajs[:, :, 1] += pad_y0 + H, W = rgbs[0].shape[:2] + + # scaling + stretching + scale = np.random.uniform(self.resize_lim[0], self.resize_lim[1]) + scale_x = scale + scale_y = scale + H_new = H + W_new = W + + scale_delta_x = 0.0 + scale_delta_y = 0.0 + + rgbs_scaled = [] + for s in range(S): + if s == 1: + scale_delta_x = np.random.uniform(-self.resize_delta, self.resize_delta) + scale_delta_y = np.random.uniform(-self.resize_delta, self.resize_delta) + elif s > 1: + scale_delta_x = ( + scale_delta_x * 0.8 + + np.random.uniform(-self.resize_delta, self.resize_delta) * 0.2 + ) + scale_delta_y = ( + scale_delta_y * 0.8 + + np.random.uniform(-self.resize_delta, self.resize_delta) * 0.2 + ) + scale_x = scale_x + scale_delta_x + scale_y = scale_y + scale_delta_y + + # bring h/w closer + scale_xy = (scale_x + scale_y) * 0.5 + scale_x = scale_x * 0.5 + scale_xy * 0.5 + scale_y = scale_y * 0.5 + scale_xy * 0.5 + + # don't get too crazy + scale_x = np.clip(scale_x, 0.2, 2.0) + scale_y = np.clip(scale_y, 0.2, 2.0) + + H_new = int(H * scale_y) + W_new = int(W * scale_x) + + # make it at least slightly bigger than the crop area, + # so that the random cropping can add diversity + H_new = np.clip(H_new, self.crop_size[0] + 10, None) + W_new = np.clip(W_new, self.crop_size[1] + 10, None) + # recompute scale in case we clipped + scale_x = (W_new - 1) / float(W - 1) + scale_y = (H_new - 1) / float(H - 1) + rgbs_scaled.append(cv2.resize(rgbs[s], (W_new, H_new), interpolation=cv2.INTER_LINEAR)) + trajs[s, :, 0] *= scale_x + trajs[s, :, 1] *= scale_y + rgbs = rgbs_scaled + + ok_inds = visibles[0, :] > 0 + vis_trajs = trajs[:, ok_inds] # S,?,2 + + if vis_trajs.shape[1] > 0: + mid_x = np.mean(vis_trajs[0, :, 0]) + mid_y = np.mean(vis_trajs[0, :, 1]) + else: + mid_y = self.crop_size[0] + mid_x = self.crop_size[1] + + x0 = int(mid_x - self.crop_size[1] // 2) + y0 = int(mid_y - self.crop_size[0] // 2) + + offset_x = 0 + offset_y = 0 + + for s in range(S): + # on each frame, shift a bit more + if s == 1: + offset_x = np.random.randint(-self.max_crop_offset, self.max_crop_offset) + offset_y = np.random.randint(-self.max_crop_offset, self.max_crop_offset) + elif s > 1: + offset_x = int( + offset_x * 0.8 + + np.random.randint(-self.max_crop_offset, self.max_crop_offset + 1) * 0.2 + ) + offset_y = int( + offset_y * 0.8 + + np.random.randint(-self.max_crop_offset, self.max_crop_offset + 1) * 0.2 + ) + x0 = x0 + offset_x + y0 = y0 + offset_y + + H_new, W_new = rgbs[s].shape[:2] + if H_new == self.crop_size[0]: + y0 = 0 + else: + y0 = min(max(0, y0), H_new - self.crop_size[0] - 1) + + if W_new == self.crop_size[1]: + x0 = 0 + else: + x0 = min(max(0, x0), W_new - self.crop_size[1] - 1) + + rgbs[s] = rgbs[s][y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]] + trajs[s, :, 0] -= x0 + trajs[s, :, 1] -= y0 + + H_new = self.crop_size[0] + W_new = self.crop_size[1] + + # flip + h_flipped = False + v_flipped = False + if self.do_flip: + # h flip + if np.random.rand() < self.h_flip_prob: + h_flipped = True + rgbs = [rgb[:, ::-1] for rgb in rgbs] + # v flip + if np.random.rand() < self.v_flip_prob: + v_flipped = True + rgbs = [rgb[::-1] for rgb in rgbs] + if h_flipped: + trajs[:, :, 0] = W_new - trajs[:, :, 0] + if v_flipped: + trajs[:, :, 1] = H_new - trajs[:, :, 1] + + return rgbs, trajs + + def crop(self, rgbs, trajs): + T, N, _ = trajs.shape + + S = len(rgbs) + H, W = rgbs[0].shape[:2] + assert S == T + + ############ spatial transform ############ + + H_new = H + W_new = W + + # simple random crop + y0 = 0 if self.crop_size[0] >= H_new else np.random.randint(0, H_new - self.crop_size[0]) + x0 = 0 if self.crop_size[1] >= W_new else np.random.randint(0, W_new - self.crop_size[1]) + rgbs = [rgb[y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]] for rgb in rgbs] + + trajs[:, :, 0] -= x0 + trajs[:, :, 1] -= y0 + + return rgbs, trajs + + +class KubricMovifDataset(CoTrackerDataset): + def __init__( + self, + data_root, + crop_size=(384, 512), + seq_len=24, + traj_per_sample=768, + sample_vis_1st_frame=False, + use_augs=False, + ): + super(KubricMovifDataset, self).__init__( + data_root=data_root, + crop_size=crop_size, + seq_len=seq_len, + traj_per_sample=traj_per_sample, + sample_vis_1st_frame=sample_vis_1st_frame, + use_augs=use_augs, + ) + + self.pad_bounds = [0, 25] + self.resize_lim = [0.75, 1.25] # sample resizes from here + self.resize_delta = 0.05 + self.max_crop_offset = 15 + self.seq_names = [ + fname + for fname in os.listdir(data_root) + if os.path.isdir(os.path.join(data_root, fname)) + ] + print("found %d unique videos in %s" % (len(self.seq_names), self.data_root)) + + def getitem_helper(self, index): + gotit = True + seq_name = self.seq_names[index] + + npy_path = os.path.join(self.data_root, seq_name, seq_name + ".npy") + rgb_path = os.path.join(self.data_root, seq_name, "frames") + + img_paths = sorted(os.listdir(rgb_path)) + rgbs = [] + for i, img_path in enumerate(img_paths): + rgbs.append(imageio.v2.imread(os.path.join(rgb_path, img_path))) + + rgbs = np.stack(rgbs) + annot_dict = np.load(npy_path, allow_pickle=True).item() + traj_2d = annot_dict["coords"] + visibility = annot_dict["visibility"] + + # random crop + assert self.seq_len <= len(rgbs) + if self.seq_len < len(rgbs): + start_ind = np.random.choice(len(rgbs) - self.seq_len, 1)[0] + + rgbs = rgbs[start_ind : start_ind + self.seq_len] + traj_2d = traj_2d[:, start_ind : start_ind + self.seq_len] + visibility = visibility[:, start_ind : start_ind + self.seq_len] + + traj_2d = np.transpose(traj_2d, (1, 0, 2)) + visibility = np.transpose(np.logical_not(visibility), (1, 0)) + if self.use_augs: + rgbs, traj_2d, visibility = self.add_photometric_augs(rgbs, traj_2d, visibility) + rgbs, traj_2d = self.add_spatial_augs(rgbs, traj_2d, visibility) + else: + rgbs, traj_2d = self.crop(rgbs, traj_2d) + + visibility[traj_2d[:, :, 0] > self.crop_size[1] - 1] = False + visibility[traj_2d[:, :, 0] < 0] = False + visibility[traj_2d[:, :, 1] > self.crop_size[0] - 1] = False + visibility[traj_2d[:, :, 1] < 0] = False + + visibility = torch.from_numpy(visibility) + traj_2d = torch.from_numpy(traj_2d) + + visibile_pts_first_frame_inds = (visibility[0]).nonzero(as_tuple=False)[:, 0] + + if self.sample_vis_1st_frame: + visibile_pts_inds = visibile_pts_first_frame_inds + else: + visibile_pts_mid_frame_inds = (visibility[self.seq_len // 2]).nonzero(as_tuple=False)[ + :, 0 + ] + visibile_pts_inds = torch.cat( + (visibile_pts_first_frame_inds, visibile_pts_mid_frame_inds), dim=0 + ) + point_inds = torch.randperm(len(visibile_pts_inds))[: self.traj_per_sample] + if len(point_inds) < self.traj_per_sample: + gotit = False + + visible_inds_sampled = visibile_pts_inds[point_inds] + + trajs = traj_2d[:, visible_inds_sampled].float() + visibles = visibility[:, visible_inds_sampled] + valids = torch.ones((self.seq_len, self.traj_per_sample)) + + rgbs = torch.from_numpy(np.stack(rgbs)).permute(0, 3, 1, 2).float() + sample = CoTrackerData( + video=rgbs, + trajectory=trajs, + visibility=visibles, + valid=valids, + seq_name=seq_name, + ) + return sample, gotit + + def __len__(self): + return len(self.seq_names) diff --git a/cotracker/datasets/tap_vid_datasets.py b/cotracker/datasets/tap_vid_datasets.py index 72e0001..5597b83 100644 --- a/cotracker/datasets/tap_vid_datasets.py +++ b/cotracker/datasets/tap_vid_datasets.py @@ -1,209 +1,209 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import os -import io -import glob -import torch -import pickle -import numpy as np -import mediapy as media - -from PIL import Image -from typing import Mapping, Tuple, Union - -from cotracker.datasets.utils import CoTrackerData - -DatasetElement = Mapping[str, Mapping[str, Union[np.ndarray, str]]] - - -def resize_video(video: np.ndarray, output_size: Tuple[int, int]) -> np.ndarray: - """Resize a video to output_size.""" - # If you have a GPU, consider replacing this with a GPU-enabled resize op, - # such as a jitted jax.image.resize. It will make things faster. - return media.resize_video(video, output_size) - - -def sample_queries_first( - target_occluded: np.ndarray, - target_points: np.ndarray, - frames: np.ndarray, -) -> Mapping[str, np.ndarray]: - """Package a set of frames and tracks for use in TAPNet evaluations. - Given a set of frames and tracks with no query points, use the first - visible point in each track as the query. - Args: - target_occluded: Boolean occlusion flag, of shape [n_tracks, n_frames], - where True indicates occluded. - target_points: Position, of shape [n_tracks, n_frames, 2], where each point - is [x,y] scaled between 0 and 1. - frames: Video tensor, of shape [n_frames, height, width, 3]. Scaled between - -1 and 1. - Returns: - A dict with the keys: - video: Video tensor of shape [1, n_frames, height, width, 3] - query_points: Query points of shape [1, n_queries, 3] where - each point is [t, y, x] scaled to the range [-1, 1] - target_points: Target points of shape [1, n_queries, n_frames, 2] where - each point is [x, y] scaled to the range [-1, 1] - """ - valid = np.sum(~target_occluded, axis=1) > 0 - target_points = target_points[valid, :] - target_occluded = target_occluded[valid, :] - - query_points = [] - for i in range(target_points.shape[0]): - index = np.where(target_occluded[i] == 0)[0][0] - x, y = target_points[i, index, 0], target_points[i, index, 1] - query_points.append(np.array([index, y, x])) # [t, y, x] - query_points = np.stack(query_points, axis=0) - - return { - "video": frames[np.newaxis, ...], - "query_points": query_points[np.newaxis, ...], - "target_points": target_points[np.newaxis, ...], - "occluded": target_occluded[np.newaxis, ...], - } - - -def sample_queries_strided( - target_occluded: np.ndarray, - target_points: np.ndarray, - frames: np.ndarray, - query_stride: int = 5, -) -> Mapping[str, np.ndarray]: - """Package a set of frames and tracks for use in TAPNet evaluations. - - Given a set of frames and tracks with no query points, sample queries - strided every query_stride frames, ignoring points that are not visible - at the selected frames. - - Args: - target_occluded: Boolean occlusion flag, of shape [n_tracks, n_frames], - where True indicates occluded. - target_points: Position, of shape [n_tracks, n_frames, 2], where each point - is [x,y] scaled between 0 and 1. - frames: Video tensor, of shape [n_frames, height, width, 3]. Scaled between - -1 and 1. - query_stride: When sampling query points, search for un-occluded points - every query_stride frames and convert each one into a query. - - Returns: - A dict with the keys: - video: Video tensor of shape [1, n_frames, height, width, 3]. The video - has floats scaled to the range [-1, 1]. - query_points: Query points of shape [1, n_queries, 3] where - each point is [t, y, x] scaled to the range [-1, 1]. - target_points: Target points of shape [1, n_queries, n_frames, 2] where - each point is [x, y] scaled to the range [-1, 1]. - trackgroup: Index of the original track that each query point was - sampled from. This is useful for visualization. - """ - tracks = [] - occs = [] - queries = [] - trackgroups = [] - total = 0 - trackgroup = np.arange(target_occluded.shape[0]) - for i in range(0, target_occluded.shape[1], query_stride): - mask = target_occluded[:, i] == 0 - query = np.stack( - [ - i * np.ones(target_occluded.shape[0:1]), - target_points[:, i, 1], - target_points[:, i, 0], - ], - axis=-1, - ) - queries.append(query[mask]) - tracks.append(target_points[mask]) - occs.append(target_occluded[mask]) - trackgroups.append(trackgroup[mask]) - total += np.array(np.sum(target_occluded[:, i] == 0)) - - return { - "video": frames[np.newaxis, ...], - "query_points": np.concatenate(queries, axis=0)[np.newaxis, ...], - "target_points": np.concatenate(tracks, axis=0)[np.newaxis, ...], - "occluded": np.concatenate(occs, axis=0)[np.newaxis, ...], - "trackgroup": np.concatenate(trackgroups, axis=0)[np.newaxis, ...], - } - - -class TapVidDataset(torch.utils.data.Dataset): - def __init__( - self, - data_root, - dataset_type="davis", - resize_to_256=True, - queried_first=True, - ): - self.dataset_type = dataset_type - self.resize_to_256 = resize_to_256 - self.queried_first = queried_first - if self.dataset_type == "kinetics": - all_paths = glob.glob(os.path.join(data_root, "*_of_0010.pkl")) - points_dataset = [] - for pickle_path in all_paths: - with open(pickle_path, "rb") as f: - data = pickle.load(f) - points_dataset = points_dataset + data - self.points_dataset = points_dataset - else: - with open(data_root, "rb") as f: - self.points_dataset = pickle.load(f) - if self.dataset_type == "davis": - self.video_names = list(self.points_dataset.keys()) - print("found %d unique videos in %s" % (len(self.points_dataset), data_root)) - - def __getitem__(self, index): - if self.dataset_type == "davis": - video_name = self.video_names[index] - else: - video_name = index - video = self.points_dataset[video_name] - frames = video["video"] - - if isinstance(frames[0], bytes): - # TAP-Vid is stored and JPEG bytes rather than `np.ndarray`s. - def decode(frame): - byteio = io.BytesIO(frame) - img = Image.open(byteio) - return np.array(img) - - frames = np.array([decode(frame) for frame in frames]) - - target_points = self.points_dataset[video_name]["points"] - if self.resize_to_256: - frames = resize_video(frames, [256, 256]) - target_points *= np.array([255, 255]) # 1 should be mapped to 256-1 - else: - target_points *= np.array([frames.shape[2] - 1, frames.shape[1] - 1]) - - target_occ = self.points_dataset[video_name]["occluded"] - if self.queried_first: - converted = sample_queries_first(target_occ, target_points, frames) - else: - converted = sample_queries_strided(target_occ, target_points, frames) - assert converted["target_points"].shape[1] == converted["query_points"].shape[1] - - trajs = torch.from_numpy(converted["target_points"])[0].permute(1, 0, 2).float() # T, N, D - - rgbs = torch.from_numpy(frames).permute(0, 3, 1, 2).float() - visibles = torch.logical_not(torch.from_numpy(converted["occluded"]))[0].permute( - 1, 0 - ) # T, N - query_points = torch.from_numpy(converted["query_points"])[0] # T, N - return CoTrackerData( - rgbs, - trajs, - visibles, - seq_name=str(video_name), - query_points=query_points, - ) - - def __len__(self): - return len(self.points_dataset) +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import os +import io +import glob +import torch +import pickle +import numpy as np +import mediapy as media + +from PIL import Image +from typing import Mapping, Tuple, Union + +from cotracker.datasets.utils import CoTrackerData + +DatasetElement = Mapping[str, Mapping[str, Union[np.ndarray, str]]] + + +def resize_video(video: np.ndarray, output_size: Tuple[int, int]) -> np.ndarray: + """Resize a video to output_size.""" + # If you have a GPU, consider replacing this with a GPU-enabled resize op, + # such as a jitted jax.image.resize. It will make things faster. + return media.resize_video(video, output_size) + + +def sample_queries_first( + target_occluded: np.ndarray, + target_points: np.ndarray, + frames: np.ndarray, +) -> Mapping[str, np.ndarray]: + """Package a set of frames and tracks for use in TAPNet evaluations. + Given a set of frames and tracks with no query points, use the first + visible point in each track as the query. + Args: + target_occluded: Boolean occlusion flag, of shape [n_tracks, n_frames], + where True indicates occluded. + target_points: Position, of shape [n_tracks, n_frames, 2], where each point + is [x,y] scaled between 0 and 1. + frames: Video tensor, of shape [n_frames, height, width, 3]. Scaled between + -1 and 1. + Returns: + A dict with the keys: + video: Video tensor of shape [1, n_frames, height, width, 3] + query_points: Query points of shape [1, n_queries, 3] where + each point is [t, y, x] scaled to the range [-1, 1] + target_points: Target points of shape [1, n_queries, n_frames, 2] where + each point is [x, y] scaled to the range [-1, 1] + """ + valid = np.sum(~target_occluded, axis=1) > 0 + target_points = target_points[valid, :] + target_occluded = target_occluded[valid, :] + + query_points = [] + for i in range(target_points.shape[0]): + index = np.where(target_occluded[i] == 0)[0][0] + x, y = target_points[i, index, 0], target_points[i, index, 1] + query_points.append(np.array([index, y, x])) # [t, y, x] + query_points = np.stack(query_points, axis=0) + + return { + "video": frames[np.newaxis, ...], + "query_points": query_points[np.newaxis, ...], + "target_points": target_points[np.newaxis, ...], + "occluded": target_occluded[np.newaxis, ...], + } + + +def sample_queries_strided( + target_occluded: np.ndarray, + target_points: np.ndarray, + frames: np.ndarray, + query_stride: int = 5, +) -> Mapping[str, np.ndarray]: + """Package a set of frames and tracks for use in TAPNet evaluations. + + Given a set of frames and tracks with no query points, sample queries + strided every query_stride frames, ignoring points that are not visible + at the selected frames. + + Args: + target_occluded: Boolean occlusion flag, of shape [n_tracks, n_frames], + where True indicates occluded. + target_points: Position, of shape [n_tracks, n_frames, 2], where each point + is [x,y] scaled between 0 and 1. + frames: Video tensor, of shape [n_frames, height, width, 3]. Scaled between + -1 and 1. + query_stride: When sampling query points, search for un-occluded points + every query_stride frames and convert each one into a query. + + Returns: + A dict with the keys: + video: Video tensor of shape [1, n_frames, height, width, 3]. The video + has floats scaled to the range [-1, 1]. + query_points: Query points of shape [1, n_queries, 3] where + each point is [t, y, x] scaled to the range [-1, 1]. + target_points: Target points of shape [1, n_queries, n_frames, 2] where + each point is [x, y] scaled to the range [-1, 1]. + trackgroup: Index of the original track that each query point was + sampled from. This is useful for visualization. + """ + tracks = [] + occs = [] + queries = [] + trackgroups = [] + total = 0 + trackgroup = np.arange(target_occluded.shape[0]) + for i in range(0, target_occluded.shape[1], query_stride): + mask = target_occluded[:, i] == 0 + query = np.stack( + [ + i * np.ones(target_occluded.shape[0:1]), + target_points[:, i, 1], + target_points[:, i, 0], + ], + axis=-1, + ) + queries.append(query[mask]) + tracks.append(target_points[mask]) + occs.append(target_occluded[mask]) + trackgroups.append(trackgroup[mask]) + total += np.array(np.sum(target_occluded[:, i] == 0)) + + return { + "video": frames[np.newaxis, ...], + "query_points": np.concatenate(queries, axis=0)[np.newaxis, ...], + "target_points": np.concatenate(tracks, axis=0)[np.newaxis, ...], + "occluded": np.concatenate(occs, axis=0)[np.newaxis, ...], + "trackgroup": np.concatenate(trackgroups, axis=0)[np.newaxis, ...], + } + + +class TapVidDataset(torch.utils.data.Dataset): + def __init__( + self, + data_root, + dataset_type="davis", + resize_to_256=True, + queried_first=True, + ): + self.dataset_type = dataset_type + self.resize_to_256 = resize_to_256 + self.queried_first = queried_first + if self.dataset_type == "kinetics": + all_paths = glob.glob(os.path.join(data_root, "*_of_0010.pkl")) + points_dataset = [] + for pickle_path in all_paths: + with open(pickle_path, "rb") as f: + data = pickle.load(f) + points_dataset = points_dataset + data + self.points_dataset = points_dataset + else: + with open(data_root, "rb") as f: + self.points_dataset = pickle.load(f) + if self.dataset_type == "davis": + self.video_names = list(self.points_dataset.keys()) + print("found %d unique videos in %s" % (len(self.points_dataset), data_root)) + + def __getitem__(self, index): + if self.dataset_type == "davis": + video_name = self.video_names[index] + else: + video_name = index + video = self.points_dataset[video_name] + frames = video["video"] + + if isinstance(frames[0], bytes): + # TAP-Vid is stored and JPEG bytes rather than `np.ndarray`s. + def decode(frame): + byteio = io.BytesIO(frame) + img = Image.open(byteio) + return np.array(img) + + frames = np.array([decode(frame) for frame in frames]) + + target_points = self.points_dataset[video_name]["points"] + if self.resize_to_256: + frames = resize_video(frames, [256, 256]) + target_points *= np.array([255, 255]) # 1 should be mapped to 256-1 + else: + target_points *= np.array([frames.shape[2] - 1, frames.shape[1] - 1]) + + target_occ = self.points_dataset[video_name]["occluded"] + if self.queried_first: + converted = sample_queries_first(target_occ, target_points, frames) + else: + converted = sample_queries_strided(target_occ, target_points, frames) + assert converted["target_points"].shape[1] == converted["query_points"].shape[1] + + trajs = torch.from_numpy(converted["target_points"])[0].permute(1, 0, 2).float() # T, N, D + + rgbs = torch.from_numpy(frames).permute(0, 3, 1, 2).float() + visibles = torch.logical_not(torch.from_numpy(converted["occluded"]))[0].permute( + 1, 0 + ) # T, N + query_points = torch.from_numpy(converted["query_points"])[0] # T, N + return CoTrackerData( + rgbs, + trajs, + visibles, + seq_name=str(video_name), + query_points=query_points, + ) + + def __len__(self): + return len(self.points_dataset) diff --git a/cotracker/datasets/utils.py b/cotracker/datasets/utils.py index 30149f1..09b5ede 100644 --- a/cotracker/datasets/utils.py +++ b/cotracker/datasets/utils.py @@ -1,106 +1,106 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -import torch -import dataclasses -import torch.nn.functional as F -from dataclasses import dataclass -from typing import Any, Optional - - -@dataclass(eq=False) -class CoTrackerData: - """ - Dataclass for storing video tracks data. - """ - - video: torch.Tensor # B, S, C, H, W - trajectory: torch.Tensor # B, S, N, 2 - visibility: torch.Tensor # B, S, N - # optional data - valid: Optional[torch.Tensor] = None # B, S, N - segmentation: Optional[torch.Tensor] = None # B, S, 1, H, W - seq_name: Optional[str] = None - query_points: Optional[torch.Tensor] = None # TapVID evaluation format - - -def collate_fn(batch): - """ - Collate function for video tracks data. - """ - video = torch.stack([b.video for b in batch], dim=0) - trajectory = torch.stack([b.trajectory for b in batch], dim=0) - visibility = torch.stack([b.visibility for b in batch], dim=0) - query_points = segmentation = None - if batch[0].query_points is not None: - query_points = torch.stack([b.query_points for b in batch], dim=0) - if batch[0].segmentation is not None: - segmentation = torch.stack([b.segmentation for b in batch], dim=0) - seq_name = [b.seq_name for b in batch] - - return CoTrackerData( - video=video, - trajectory=trajectory, - visibility=visibility, - segmentation=segmentation, - seq_name=seq_name, - query_points=query_points, - ) - - -def collate_fn_train(batch): - """ - Collate function for video tracks data during training. - """ - gotit = [gotit for _, gotit in batch] - video = torch.stack([b.video for b, _ in batch], dim=0) - trajectory = torch.stack([b.trajectory for b, _ in batch], dim=0) - visibility = torch.stack([b.visibility for b, _ in batch], dim=0) - valid = torch.stack([b.valid for b, _ in batch], dim=0) - seq_name = [b.seq_name for b, _ in batch] - return ( - CoTrackerData( - video=video, - trajectory=trajectory, - visibility=visibility, - valid=valid, - seq_name=seq_name, - ), - gotit, - ) - - -def try_to_cuda(t: Any) -> Any: - """ - Try to move the input variable `t` to a cuda device. - - Args: - t: Input. - - Returns: - t_cuda: `t` moved to a cuda device, if supported. - """ - try: - t = t.float().cuda() - except AttributeError: - pass - return t - - -def dataclass_to_cuda_(obj): - """ - Move all contents of a dataclass to cuda inplace if supported. - - Args: - batch: Input dataclass. - - Returns: - batch_cuda: `batch` moved to a cuda device, if supported. - """ - for f in dataclasses.fields(obj): - setattr(obj, f.name, try_to_cuda(getattr(obj, f.name))) - return obj +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import torch +import dataclasses +import torch.nn.functional as F +from dataclasses import dataclass +from typing import Any, Optional + + +@dataclass(eq=False) +class CoTrackerData: + """ + Dataclass for storing video tracks data. + """ + + video: torch.Tensor # B, S, C, H, W + trajectory: torch.Tensor # B, S, N, 2 + visibility: torch.Tensor # B, S, N + # optional data + valid: Optional[torch.Tensor] = None # B, S, N + segmentation: Optional[torch.Tensor] = None # B, S, 1, H, W + seq_name: Optional[str] = None + query_points: Optional[torch.Tensor] = None # TapVID evaluation format + + +def collate_fn(batch): + """ + Collate function for video tracks data. + """ + video = torch.stack([b.video for b in batch], dim=0) + trajectory = torch.stack([b.trajectory for b in batch], dim=0) + visibility = torch.stack([b.visibility for b in batch], dim=0) + query_points = segmentation = None + if batch[0].query_points is not None: + query_points = torch.stack([b.query_points for b in batch], dim=0) + if batch[0].segmentation is not None: + segmentation = torch.stack([b.segmentation for b in batch], dim=0) + seq_name = [b.seq_name for b in batch] + + return CoTrackerData( + video=video, + trajectory=trajectory, + visibility=visibility, + segmentation=segmentation, + seq_name=seq_name, + query_points=query_points, + ) + + +def collate_fn_train(batch): + """ + Collate function for video tracks data during training. + """ + gotit = [gotit for _, gotit in batch] + video = torch.stack([b.video for b, _ in batch], dim=0) + trajectory = torch.stack([b.trajectory for b, _ in batch], dim=0) + visibility = torch.stack([b.visibility for b, _ in batch], dim=0) + valid = torch.stack([b.valid for b, _ in batch], dim=0) + seq_name = [b.seq_name for b, _ in batch] + return ( + CoTrackerData( + video=video, + trajectory=trajectory, + visibility=visibility, + valid=valid, + seq_name=seq_name, + ), + gotit, + ) + + +def try_to_cuda(t: Any) -> Any: + """ + Try to move the input variable `t` to a cuda device. + + Args: + t: Input. + + Returns: + t_cuda: `t` moved to a cuda device, if supported. + """ + try: + t = t.float().cuda() + except AttributeError: + pass + return t + + +def dataclass_to_cuda_(obj): + """ + Move all contents of a dataclass to cuda inplace if supported. + + Args: + batch: Input dataclass. + + Returns: + batch_cuda: `batch` moved to a cuda device, if supported. + """ + for f in dataclasses.fields(obj): + setattr(obj, f.name, try_to_cuda(getattr(obj, f.name))) + return obj diff --git a/cotracker/evaluation/__init__.py b/cotracker/evaluation/__init__.py index 5277f46..4547e07 100644 --- a/cotracker/evaluation/__init__.py +++ b/cotracker/evaluation/__init__.py @@ -1,5 +1,5 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/cotracker/evaluation/configs/eval_dynamic_replica.yaml b/cotracker/evaluation/configs/eval_dynamic_replica.yaml index 7d6fca9..2f9c325 100644 --- a/cotracker/evaluation/configs/eval_dynamic_replica.yaml +++ b/cotracker/evaluation/configs/eval_dynamic_replica.yaml @@ -1,6 +1,6 @@ -defaults: - - default_config_eval -exp_dir: ./outputs/cotracker -dataset_name: dynamic_replica - +defaults: + - default_config_eval +exp_dir: ./outputs/cotracker +dataset_name: dynamic_replica + \ No newline at end of file diff --git a/cotracker/evaluation/configs/eval_tapvid_davis_first.yaml b/cotracker/evaluation/configs/eval_tapvid_davis_first.yaml index d37a6c9..0d72e37 100644 --- a/cotracker/evaluation/configs/eval_tapvid_davis_first.yaml +++ b/cotracker/evaluation/configs/eval_tapvid_davis_first.yaml @@ -1,6 +1,6 @@ -defaults: - - default_config_eval -exp_dir: ./outputs/cotracker -dataset_name: tapvid_davis_first - +defaults: + - default_config_eval +exp_dir: ./outputs/cotracker +dataset_name: tapvid_davis_first + \ No newline at end of file diff --git a/cotracker/evaluation/configs/eval_tapvid_davis_strided.yaml b/cotracker/evaluation/configs/eval_tapvid_davis_strided.yaml index 6e3cf3c..5a687bc 100644 --- a/cotracker/evaluation/configs/eval_tapvid_davis_strided.yaml +++ b/cotracker/evaluation/configs/eval_tapvid_davis_strided.yaml @@ -1,6 +1,6 @@ -defaults: - - default_config_eval -exp_dir: ./outputs/cotracker -dataset_name: tapvid_davis_strided - +defaults: + - default_config_eval +exp_dir: ./outputs/cotracker +dataset_name: tapvid_davis_strided + \ No newline at end of file diff --git a/cotracker/evaluation/configs/eval_tapvid_kinetics_first.yaml b/cotracker/evaluation/configs/eval_tapvid_kinetics_first.yaml index 3be8914..f8651f6 100644 --- a/cotracker/evaluation/configs/eval_tapvid_kinetics_first.yaml +++ b/cotracker/evaluation/configs/eval_tapvid_kinetics_first.yaml @@ -1,6 +1,6 @@ -defaults: - - default_config_eval -exp_dir: ./outputs/cotracker -dataset_name: tapvid_kinetics_first - +defaults: + - default_config_eval +exp_dir: ./outputs/cotracker +dataset_name: tapvid_kinetics_first + \ No newline at end of file diff --git a/cotracker/evaluation/core/__init__.py b/cotracker/evaluation/core/__init__.py index 5277f46..4547e07 100644 --- a/cotracker/evaluation/core/__init__.py +++ b/cotracker/evaluation/core/__init__.py @@ -1,5 +1,5 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/cotracker/evaluation/core/eval_utils.py b/cotracker/evaluation/core/eval_utils.py index 7002fa5..dca0380 100644 --- a/cotracker/evaluation/core/eval_utils.py +++ b/cotracker/evaluation/core/eval_utils.py @@ -1,138 +1,138 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import numpy as np - -from typing import Iterable, Mapping, Tuple, Union - - -def compute_tapvid_metrics( - query_points: np.ndarray, - gt_occluded: np.ndarray, - gt_tracks: np.ndarray, - pred_occluded: np.ndarray, - pred_tracks: np.ndarray, - query_mode: str, -) -> Mapping[str, np.ndarray]: - """Computes TAP-Vid metrics (Jaccard, Pts. Within Thresh, Occ. Acc.) - See the TAP-Vid paper for details on the metric computation. All inputs are - given in raster coordinates. The first three arguments should be the direct - outputs of the reader: the 'query_points', 'occluded', and 'target_points'. - The paper metrics assume these are scaled relative to 256x256 images. - pred_occluded and pred_tracks are your algorithm's predictions. - This function takes a batch of inputs, and computes metrics separately for - each video. The metrics for the full benchmark are a simple mean of the - metrics across the full set of videos. These numbers are between 0 and 1, - but the paper multiplies them by 100 to ease reading. - Args: - query_points: The query points, an in the format [t, y, x]. Its size is - [b, n, 3], where b is the batch size and n is the number of queries - gt_occluded: A boolean array of shape [b, n, t], where t is the number - of frames. True indicates that the point is occluded. - gt_tracks: The target points, of shape [b, n, t, 2]. Each point is - in the format [x, y] - pred_occluded: A boolean array of predicted occlusions, in the same - format as gt_occluded. - pred_tracks: An array of track predictions from your algorithm, in the - same format as gt_tracks. - query_mode: Either 'first' or 'strided', depending on how queries are - sampled. If 'first', we assume the prior knowledge that all points - before the query point are occluded, and these are removed from the - evaluation. - Returns: - A dict with the following keys: - occlusion_accuracy: Accuracy at predicting occlusion. - pts_within_{x} for x in [1, 2, 4, 8, 16]: Fraction of points - predicted to be within the given pixel threshold, ignoring occlusion - prediction. - jaccard_{x} for x in [1, 2, 4, 8, 16]: Jaccard metric for the given - threshold - average_pts_within_thresh: average across pts_within_{x} - average_jaccard: average across jaccard_{x} - """ - - metrics = {} - # Fixed bug is described in: - # https://github.com/facebookresearch/co-tracker/issues/20 - eye = np.eye(gt_tracks.shape[2], dtype=np.int32) - - if query_mode == "first": - # evaluate frames after the query frame - query_frame_to_eval_frames = np.cumsum(eye, axis=1) - eye - elif query_mode == "strided": - # evaluate all frames except the query frame - query_frame_to_eval_frames = 1 - eye - else: - raise ValueError("Unknown query mode " + query_mode) - - query_frame = query_points[..., 0] - query_frame = np.round(query_frame).astype(np.int32) - evaluation_points = query_frame_to_eval_frames[query_frame] > 0 - - # Occlusion accuracy is simply how often the predicted occlusion equals the - # ground truth. - occ_acc = np.sum( - np.equal(pred_occluded, gt_occluded) & evaluation_points, - axis=(1, 2), - ) / np.sum(evaluation_points) - metrics["occlusion_accuracy"] = occ_acc - - # Next, convert the predictions and ground truth positions into pixel - # coordinates. - visible = np.logical_not(gt_occluded) - pred_visible = np.logical_not(pred_occluded) - all_frac_within = [] - all_jaccard = [] - for thresh in [1, 2, 4, 8, 16]: - # True positives are points that are within the threshold and where both - # the prediction and the ground truth are listed as visible. - within_dist = np.sum( - np.square(pred_tracks - gt_tracks), - axis=-1, - ) < np.square(thresh) - is_correct = np.logical_and(within_dist, visible) - - # Compute the frac_within_threshold, which is the fraction of points - # within the threshold among points that are visible in the ground truth, - # ignoring whether they're predicted to be visible. - count_correct = np.sum( - is_correct & evaluation_points, - axis=(1, 2), - ) - count_visible_points = np.sum(visible & evaluation_points, axis=(1, 2)) - frac_correct = count_correct / count_visible_points - metrics["pts_within_" + str(thresh)] = frac_correct - all_frac_within.append(frac_correct) - - true_positives = np.sum( - is_correct & pred_visible & evaluation_points, axis=(1, 2) - ) - - # The denominator of the jaccard metric is the true positives plus - # false positives plus false negatives. However, note that true positives - # plus false negatives is simply the number of points in the ground truth - # which is easier to compute than trying to compute all three quantities. - # Thus we just add the number of points in the ground truth to the number - # of false positives. - # - # False positives are simply points that are predicted to be visible, - # but the ground truth is not visible or too far from the prediction. - gt_positives = np.sum(visible & evaluation_points, axis=(1, 2)) - false_positives = (~visible) & pred_visible - false_positives = false_positives | ((~within_dist) & pred_visible) - false_positives = np.sum(false_positives & evaluation_points, axis=(1, 2)) - jaccard = true_positives / (gt_positives + false_positives) - metrics["jaccard_" + str(thresh)] = jaccard - all_jaccard.append(jaccard) - metrics["average_jaccard"] = np.mean( - np.stack(all_jaccard, axis=1), - axis=1, - ) - metrics["average_pts_within_thresh"] = np.mean( - np.stack(all_frac_within, axis=1), - axis=1, - ) - return metrics +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np + +from typing import Iterable, Mapping, Tuple, Union + + +def compute_tapvid_metrics( + query_points: np.ndarray, + gt_occluded: np.ndarray, + gt_tracks: np.ndarray, + pred_occluded: np.ndarray, + pred_tracks: np.ndarray, + query_mode: str, +) -> Mapping[str, np.ndarray]: + """Computes TAP-Vid metrics (Jaccard, Pts. Within Thresh, Occ. Acc.) + See the TAP-Vid paper for details on the metric computation. All inputs are + given in raster coordinates. The first three arguments should be the direct + outputs of the reader: the 'query_points', 'occluded', and 'target_points'. + The paper metrics assume these are scaled relative to 256x256 images. + pred_occluded and pred_tracks are your algorithm's predictions. + This function takes a batch of inputs, and computes metrics separately for + each video. The metrics for the full benchmark are a simple mean of the + metrics across the full set of videos. These numbers are between 0 and 1, + but the paper multiplies them by 100 to ease reading. + Args: + query_points: The query points, an in the format [t, y, x]. Its size is + [b, n, 3], where b is the batch size and n is the number of queries + gt_occluded: A boolean array of shape [b, n, t], where t is the number + of frames. True indicates that the point is occluded. + gt_tracks: The target points, of shape [b, n, t, 2]. Each point is + in the format [x, y] + pred_occluded: A boolean array of predicted occlusions, in the same + format as gt_occluded. + pred_tracks: An array of track predictions from your algorithm, in the + same format as gt_tracks. + query_mode: Either 'first' or 'strided', depending on how queries are + sampled. If 'first', we assume the prior knowledge that all points + before the query point are occluded, and these are removed from the + evaluation. + Returns: + A dict with the following keys: + occlusion_accuracy: Accuracy at predicting occlusion. + pts_within_{x} for x in [1, 2, 4, 8, 16]: Fraction of points + predicted to be within the given pixel threshold, ignoring occlusion + prediction. + jaccard_{x} for x in [1, 2, 4, 8, 16]: Jaccard metric for the given + threshold + average_pts_within_thresh: average across pts_within_{x} + average_jaccard: average across jaccard_{x} + """ + + metrics = {} + # Fixed bug is described in: + # https://github.com/facebookresearch/co-tracker/issues/20 + eye = np.eye(gt_tracks.shape[2], dtype=np.int32) + + if query_mode == "first": + # evaluate frames after the query frame + query_frame_to_eval_frames = np.cumsum(eye, axis=1) - eye + elif query_mode == "strided": + # evaluate all frames except the query frame + query_frame_to_eval_frames = 1 - eye + else: + raise ValueError("Unknown query mode " + query_mode) + + query_frame = query_points[..., 0] + query_frame = np.round(query_frame).astype(np.int32) + evaluation_points = query_frame_to_eval_frames[query_frame] > 0 + + # Occlusion accuracy is simply how often the predicted occlusion equals the + # ground truth. + occ_acc = np.sum( + np.equal(pred_occluded, gt_occluded) & evaluation_points, + axis=(1, 2), + ) / np.sum(evaluation_points) + metrics["occlusion_accuracy"] = occ_acc + + # Next, convert the predictions and ground truth positions into pixel + # coordinates. + visible = np.logical_not(gt_occluded) + pred_visible = np.logical_not(pred_occluded) + all_frac_within = [] + all_jaccard = [] + for thresh in [1, 2, 4, 8, 16]: + # True positives are points that are within the threshold and where both + # the prediction and the ground truth are listed as visible. + within_dist = np.sum( + np.square(pred_tracks - gt_tracks), + axis=-1, + ) < np.square(thresh) + is_correct = np.logical_and(within_dist, visible) + + # Compute the frac_within_threshold, which is the fraction of points + # within the threshold among points that are visible in the ground truth, + # ignoring whether they're predicted to be visible. + count_correct = np.sum( + is_correct & evaluation_points, + axis=(1, 2), + ) + count_visible_points = np.sum(visible & evaluation_points, axis=(1, 2)) + frac_correct = count_correct / count_visible_points + metrics["pts_within_" + str(thresh)] = frac_correct + all_frac_within.append(frac_correct) + + true_positives = np.sum( + is_correct & pred_visible & evaluation_points, axis=(1, 2) + ) + + # The denominator of the jaccard metric is the true positives plus + # false positives plus false negatives. However, note that true positives + # plus false negatives is simply the number of points in the ground truth + # which is easier to compute than trying to compute all three quantities. + # Thus we just add the number of points in the ground truth to the number + # of false positives. + # + # False positives are simply points that are predicted to be visible, + # but the ground truth is not visible or too far from the prediction. + gt_positives = np.sum(visible & evaluation_points, axis=(1, 2)) + false_positives = (~visible) & pred_visible + false_positives = false_positives | ((~within_dist) & pred_visible) + false_positives = np.sum(false_positives & evaluation_points, axis=(1, 2)) + jaccard = true_positives / (gt_positives + false_positives) + metrics["jaccard_" + str(thresh)] = jaccard + all_jaccard.append(jaccard) + metrics["average_jaccard"] = np.mean( + np.stack(all_jaccard, axis=1), + axis=1, + ) + metrics["average_pts_within_thresh"] = np.mean( + np.stack(all_frac_within, axis=1), + axis=1, + ) + return metrics diff --git a/cotracker/evaluation/core/evaluator.py b/cotracker/evaluation/core/evaluator.py index ffc697e..d8487e6 100644 --- a/cotracker/evaluation/core/evaluator.py +++ b/cotracker/evaluation/core/evaluator.py @@ -1,253 +1,253 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from collections import defaultdict -import os -from typing import Optional -import torch -from tqdm import tqdm -import numpy as np - -from torch.utils.tensorboard import SummaryWriter -from cotracker.datasets.utils import dataclass_to_cuda_ -from cotracker.utils.visualizer import Visualizer -from cotracker.models.core.model_utils import reduce_masked_mean -from cotracker.evaluation.core.eval_utils import compute_tapvid_metrics - -import logging - - -class Evaluator: - """ - A class defining the CoTracker evaluator. - """ - - def __init__(self, exp_dir) -> None: - # Visualization - self.exp_dir = exp_dir - os.makedirs(exp_dir, exist_ok=True) - self.visualization_filepaths = defaultdict(lambda: defaultdict(list)) - self.visualize_dir = os.path.join(exp_dir, "visualisations") - - def compute_metrics(self, metrics, sample, pred_trajectory, dataset_name): - if isinstance(pred_trajectory, tuple): - pred_trajectory, pred_visibility = pred_trajectory - else: - pred_visibility = None - if "tapvid" in dataset_name: - B, T, N, D = sample.trajectory.shape - traj = sample.trajectory.clone() - thr = 0.9 - - if pred_visibility is None: - logging.warning("visibility is NONE") - pred_visibility = torch.zeros_like(sample.visibility) - - if not pred_visibility.dtype == torch.bool: - pred_visibility = pred_visibility > thr - - query_points = sample.query_points.clone().cpu().numpy() - - pred_visibility = pred_visibility[:, :, :N] - pred_trajectory = pred_trajectory[:, :, :N] - - gt_tracks = traj.permute(0, 2, 1, 3).cpu().numpy() - gt_occluded = ( - torch.logical_not(sample.visibility.clone().permute(0, 2, 1)).cpu().numpy() - ) - - pred_occluded = ( - torch.logical_not(pred_visibility.clone().permute(0, 2, 1)).cpu().numpy() - ) - pred_tracks = pred_trajectory.permute(0, 2, 1, 3).cpu().numpy() - - out_metrics = compute_tapvid_metrics( - query_points, - gt_occluded, - gt_tracks, - pred_occluded, - pred_tracks, - query_mode="strided" if "strided" in dataset_name else "first", - ) - - metrics[sample.seq_name[0]] = out_metrics - for metric_name in out_metrics.keys(): - if "avg" not in metrics: - metrics["avg"] = {} - metrics["avg"][metric_name] = np.mean( - [v[metric_name] for k, v in metrics.items() if k != "avg"] - ) - - logging.info(f"Metrics: {out_metrics}") - logging.info(f"avg: {metrics['avg']}") - print("metrics", out_metrics) - print("avg", metrics["avg"]) - elif dataset_name == "dynamic_replica" or dataset_name == "pointodyssey": - *_, N, _ = sample.trajectory.shape - B, T, N = sample.visibility.shape - H, W = sample.video.shape[-2:] - device = sample.video.device - - out_metrics = {} - - d_vis_sum = d_occ_sum = d_sum_all = 0.0 - thrs = [1, 2, 4, 8, 16] - sx_ = (W - 1) / 255.0 - sy_ = (H - 1) / 255.0 - sc_py = np.array([sx_, sy_]).reshape([1, 1, 2]) - sc_pt = torch.from_numpy(sc_py).float().to(device) - __, first_visible_inds = torch.max(sample.visibility, dim=1) - - frame_ids_tensor = torch.arange(T, device=device)[None, :, None].repeat(B, 1, N) - start_tracking_mask = frame_ids_tensor > (first_visible_inds.unsqueeze(1)) - - for thr in thrs: - d_ = ( - torch.norm( - pred_trajectory[..., :2] / sc_pt - sample.trajectory[..., :2] / sc_pt, - dim=-1, - ) - < thr - ).float() # B,S-1,N - d_occ = ( - reduce_masked_mean(d_, (1 - sample.visibility) * start_tracking_mask).item() - * 100.0 - ) - d_occ_sum += d_occ - out_metrics[f"accuracy_occ_{thr}"] = d_occ - - d_vis = ( - reduce_masked_mean(d_, sample.visibility * start_tracking_mask).item() * 100.0 - ) - d_vis_sum += d_vis - out_metrics[f"accuracy_vis_{thr}"] = d_vis - - d_all = reduce_masked_mean(d_, start_tracking_mask).item() * 100.0 - d_sum_all += d_all - out_metrics[f"accuracy_{thr}"] = d_all - - d_occ_avg = d_occ_sum / len(thrs) - d_vis_avg = d_vis_sum / len(thrs) - d_all_avg = d_sum_all / len(thrs) - - sur_thr = 50 - dists = torch.norm( - pred_trajectory[..., :2] / sc_pt - sample.trajectory[..., :2] / sc_pt, - dim=-1, - ) # B,S,N - dist_ok = 1 - (dists > sur_thr).float() * sample.visibility # B,S,N - survival = torch.cumprod(dist_ok, dim=1) # B,S,N - out_metrics["survival"] = torch.mean(survival).item() * 100.0 - - out_metrics["accuracy_occ"] = d_occ_avg - out_metrics["accuracy_vis"] = d_vis_avg - out_metrics["accuracy"] = d_all_avg - - metrics[sample.seq_name[0]] = out_metrics - for metric_name in out_metrics.keys(): - if "avg" not in metrics: - metrics["avg"] = {} - metrics["avg"][metric_name] = float( - np.mean([v[metric_name] for k, v in metrics.items() if k != "avg"]) - ) - - logging.info(f"Metrics: {out_metrics}") - logging.info(f"avg: {metrics['avg']}") - print("metrics", out_metrics) - print("avg", metrics["avg"]) - - @torch.no_grad() - def evaluate_sequence( - self, - model, - test_dataloader: torch.utils.data.DataLoader, - dataset_name: str, - train_mode=False, - visualize_every: int = 1, - writer: Optional[SummaryWriter] = None, - step: Optional[int] = 0, - ): - metrics = {} - - vis = Visualizer( - save_dir=self.exp_dir, - fps=7, - ) - - for ind, sample in enumerate(tqdm(test_dataloader)): - if isinstance(sample, tuple): - sample, gotit = sample - if not all(gotit): - print("batch is None") - continue - if torch.cuda.is_available(): - dataclass_to_cuda_(sample) - device = torch.device("cuda") - else: - device = torch.device("cpu") - - if ( - not train_mode - and hasattr(model, "sequence_len") - and (sample.visibility[:, : model.sequence_len].sum() == 0) - ): - print(f"skipping batch {ind}") - continue - - if "tapvid" in dataset_name: - queries = sample.query_points.clone().float() - - queries = torch.stack( - [ - queries[:, :, 0], - queries[:, :, 2], - queries[:, :, 1], - ], - dim=2, - ).to(device) - else: - queries = torch.cat( - [ - torch.zeros_like(sample.trajectory[:, 0, :, :1]), - sample.trajectory[:, 0], - ], - dim=2, - ).to(device) - - pred_tracks = model(sample.video, queries) - if "strided" in dataset_name: - inv_video = sample.video.flip(1).clone() - inv_queries = queries.clone() - inv_queries[:, :, 0] = inv_video.shape[1] - inv_queries[:, :, 0] - 1 - - pred_trj, pred_vsb = pred_tracks - inv_pred_trj, inv_pred_vsb = model(inv_video, inv_queries) - - inv_pred_trj = inv_pred_trj.flip(1) - inv_pred_vsb = inv_pred_vsb.flip(1) - - mask = pred_trj == 0 - - pred_trj[mask] = inv_pred_trj[mask] - pred_vsb[mask[:, :, :, 0]] = inv_pred_vsb[mask[:, :, :, 0]] - - pred_tracks = pred_trj, pred_vsb - - if dataset_name == "badja" or dataset_name == "fastcapture": - seq_name = sample.seq_name[0] - else: - seq_name = str(ind) - if ind % visualize_every == 0: - vis.visualize( - sample.video, - pred_tracks[0] if isinstance(pred_tracks, tuple) else pred_tracks, - filename=dataset_name + "_" + seq_name, - writer=writer, - step=step, - ) - - self.compute_metrics(metrics, sample, pred_tracks, dataset_name) - return metrics +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from collections import defaultdict +import os +from typing import Optional +import torch +from tqdm import tqdm +import numpy as np + +from torch.utils.tensorboard import SummaryWriter +from cotracker.datasets.utils import dataclass_to_cuda_ +from cotracker.utils.visualizer import Visualizer +from cotracker.models.core.model_utils import reduce_masked_mean +from cotracker.evaluation.core.eval_utils import compute_tapvid_metrics + +import logging + + +class Evaluator: + """ + A class defining the CoTracker evaluator. + """ + + def __init__(self, exp_dir) -> None: + # Visualization + self.exp_dir = exp_dir + os.makedirs(exp_dir, exist_ok=True) + self.visualization_filepaths = defaultdict(lambda: defaultdict(list)) + self.visualize_dir = os.path.join(exp_dir, "visualisations") + + def compute_metrics(self, metrics, sample, pred_trajectory, dataset_name): + if isinstance(pred_trajectory, tuple): + pred_trajectory, pred_visibility = pred_trajectory + else: + pred_visibility = None + if "tapvid" in dataset_name: + B, T, N, D = sample.trajectory.shape + traj = sample.trajectory.clone() + thr = 0.9 + + if pred_visibility is None: + logging.warning("visibility is NONE") + pred_visibility = torch.zeros_like(sample.visibility) + + if not pred_visibility.dtype == torch.bool: + pred_visibility = pred_visibility > thr + + query_points = sample.query_points.clone().cpu().numpy() + + pred_visibility = pred_visibility[:, :, :N] + pred_trajectory = pred_trajectory[:, :, :N] + + gt_tracks = traj.permute(0, 2, 1, 3).cpu().numpy() + gt_occluded = ( + torch.logical_not(sample.visibility.clone().permute(0, 2, 1)).cpu().numpy() + ) + + pred_occluded = ( + torch.logical_not(pred_visibility.clone().permute(0, 2, 1)).cpu().numpy() + ) + pred_tracks = pred_trajectory.permute(0, 2, 1, 3).cpu().numpy() + + out_metrics = compute_tapvid_metrics( + query_points, + gt_occluded, + gt_tracks, + pred_occluded, + pred_tracks, + query_mode="strided" if "strided" in dataset_name else "first", + ) + + metrics[sample.seq_name[0]] = out_metrics + for metric_name in out_metrics.keys(): + if "avg" not in metrics: + metrics["avg"] = {} + metrics["avg"][metric_name] = np.mean( + [v[metric_name] for k, v in metrics.items() if k != "avg"] + ) + + logging.info(f"Metrics: {out_metrics}") + logging.info(f"avg: {metrics['avg']}") + print("metrics", out_metrics) + print("avg", metrics["avg"]) + elif dataset_name == "dynamic_replica" or dataset_name == "pointodyssey": + *_, N, _ = sample.trajectory.shape + B, T, N = sample.visibility.shape + H, W = sample.video.shape[-2:] + device = sample.video.device + + out_metrics = {} + + d_vis_sum = d_occ_sum = d_sum_all = 0.0 + thrs = [1, 2, 4, 8, 16] + sx_ = (W - 1) / 255.0 + sy_ = (H - 1) / 255.0 + sc_py = np.array([sx_, sy_]).reshape([1, 1, 2]) + sc_pt = torch.from_numpy(sc_py).float().to(device) + __, first_visible_inds = torch.max(sample.visibility, dim=1) + + frame_ids_tensor = torch.arange(T, device=device)[None, :, None].repeat(B, 1, N) + start_tracking_mask = frame_ids_tensor > (first_visible_inds.unsqueeze(1)) + + for thr in thrs: + d_ = ( + torch.norm( + pred_trajectory[..., :2] / sc_pt - sample.trajectory[..., :2] / sc_pt, + dim=-1, + ) + < thr + ).float() # B,S-1,N + d_occ = ( + reduce_masked_mean(d_, (1 - sample.visibility) * start_tracking_mask).item() + * 100.0 + ) + d_occ_sum += d_occ + out_metrics[f"accuracy_occ_{thr}"] = d_occ + + d_vis = ( + reduce_masked_mean(d_, sample.visibility * start_tracking_mask).item() * 100.0 + ) + d_vis_sum += d_vis + out_metrics[f"accuracy_vis_{thr}"] = d_vis + + d_all = reduce_masked_mean(d_, start_tracking_mask).item() * 100.0 + d_sum_all += d_all + out_metrics[f"accuracy_{thr}"] = d_all + + d_occ_avg = d_occ_sum / len(thrs) + d_vis_avg = d_vis_sum / len(thrs) + d_all_avg = d_sum_all / len(thrs) + + sur_thr = 50 + dists = torch.norm( + pred_trajectory[..., :2] / sc_pt - sample.trajectory[..., :2] / sc_pt, + dim=-1, + ) # B,S,N + dist_ok = 1 - (dists > sur_thr).float() * sample.visibility # B,S,N + survival = torch.cumprod(dist_ok, dim=1) # B,S,N + out_metrics["survival"] = torch.mean(survival).item() * 100.0 + + out_metrics["accuracy_occ"] = d_occ_avg + out_metrics["accuracy_vis"] = d_vis_avg + out_metrics["accuracy"] = d_all_avg + + metrics[sample.seq_name[0]] = out_metrics + for metric_name in out_metrics.keys(): + if "avg" not in metrics: + metrics["avg"] = {} + metrics["avg"][metric_name] = float( + np.mean([v[metric_name] for k, v in metrics.items() if k != "avg"]) + ) + + logging.info(f"Metrics: {out_metrics}") + logging.info(f"avg: {metrics['avg']}") + print("metrics", out_metrics) + print("avg", metrics["avg"]) + + @torch.no_grad() + def evaluate_sequence( + self, + model, + test_dataloader: torch.utils.data.DataLoader, + dataset_name: str, + train_mode=False, + visualize_every: int = 1, + writer: Optional[SummaryWriter] = None, + step: Optional[int] = 0, + ): + metrics = {} + + vis = Visualizer( + save_dir=self.exp_dir, + fps=7, + ) + + for ind, sample in enumerate(tqdm(test_dataloader)): + if isinstance(sample, tuple): + sample, gotit = sample + if not all(gotit): + print("batch is None") + continue + if torch.cuda.is_available(): + dataclass_to_cuda_(sample) + device = torch.device("cuda") + else: + device = torch.device("cpu") + + if ( + not train_mode + and hasattr(model, "sequence_len") + and (sample.visibility[:, : model.sequence_len].sum() == 0) + ): + print(f"skipping batch {ind}") + continue + + if "tapvid" in dataset_name: + queries = sample.query_points.clone().float() + + queries = torch.stack( + [ + queries[:, :, 0], + queries[:, :, 2], + queries[:, :, 1], + ], + dim=2, + ).to(device) + else: + queries = torch.cat( + [ + torch.zeros_like(sample.trajectory[:, 0, :, :1]), + sample.trajectory[:, 0], + ], + dim=2, + ).to(device) + + pred_tracks = model(sample.video, queries) + if "strided" in dataset_name: + inv_video = sample.video.flip(1).clone() + inv_queries = queries.clone() + inv_queries[:, :, 0] = inv_video.shape[1] - inv_queries[:, :, 0] - 1 + + pred_trj, pred_vsb = pred_tracks + inv_pred_trj, inv_pred_vsb = model(inv_video, inv_queries) + + inv_pred_trj = inv_pred_trj.flip(1) + inv_pred_vsb = inv_pred_vsb.flip(1) + + mask = pred_trj == 0 + + pred_trj[mask] = inv_pred_trj[mask] + pred_vsb[mask[:, :, :, 0]] = inv_pred_vsb[mask[:, :, :, 0]] + + pred_tracks = pred_trj, pred_vsb + + if dataset_name == "badja" or dataset_name == "fastcapture": + seq_name = sample.seq_name[0] + else: + seq_name = str(ind) + if ind % visualize_every == 0: + vis.visualize( + sample.video, + pred_tracks[0] if isinstance(pred_tracks, tuple) else pred_tracks, + filename=dataset_name + "_" + seq_name, + writer=writer, + step=step, + ) + + self.compute_metrics(metrics, sample, pred_tracks, dataset_name) + return metrics diff --git a/cotracker/evaluation/evaluate.py b/cotracker/evaluation/evaluate.py index 5d679d2..f12248d 100644 --- a/cotracker/evaluation/evaluate.py +++ b/cotracker/evaluation/evaluate.py @@ -1,169 +1,169 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import json -import os -from dataclasses import dataclass, field - -import hydra -import numpy as np - -import torch -from omegaconf import OmegaConf - -from cotracker.datasets.tap_vid_datasets import TapVidDataset -from cotracker.datasets.dr_dataset import DynamicReplicaDataset -from cotracker.datasets.utils import collate_fn - -from cotracker.models.evaluation_predictor import EvaluationPredictor - -from cotracker.evaluation.core.evaluator import Evaluator -from cotracker.models.build_cotracker import ( - build_cotracker, -) - - -@dataclass(eq=False) -class DefaultConfig: - # Directory where all outputs of the experiment will be saved. - exp_dir: str = "./outputs" - - # Name of the dataset to be used for the evaluation. - dataset_name: str = "tapvid_davis_first" - # The root directory of the dataset. - dataset_root: str = "./" - - # Path to the pre-trained model checkpoint to be used for the evaluation. - # The default value is the path to a specific CoTracker model checkpoint. - checkpoint: str = "./checkpoints/cotracker2.pth" - - # EvaluationPredictor parameters - # The size (N) of the support grid used in the predictor. - # The total number of points is (N*N). - grid_size: int = 5 - # The size (N) of the local support grid. - local_grid_size: int = 8 - # A flag indicating whether to evaluate one ground truth point at a time. - single_point: bool = True - # The number of iterative updates for each sliding window. - n_iters: int = 6 - - seed: int = 0 - gpu_idx: int = 0 - - # Override hydra's working directory to current working dir, - # also disable storing the .hydra logs: - hydra: dict = field( - default_factory=lambda: { - "run": {"dir": "."}, - "output_subdir": None, - } - ) - - -def run_eval(cfg: DefaultConfig): - """ - The function evaluates CoTracker on a specified benchmark dataset based on a provided configuration. - - Args: - cfg (DefaultConfig): An instance of DefaultConfig class which includes: - - exp_dir (str): The directory path for the experiment. - - dataset_name (str): The name of the dataset to be used. - - dataset_root (str): The root directory of the dataset. - - checkpoint (str): The path to the CoTracker model's checkpoint. - - single_point (bool): A flag indicating whether to evaluate one ground truth point at a time. - - n_iters (int): The number of iterative updates for each sliding window. - - seed (int): The seed for setting the random state for reproducibility. - - gpu_idx (int): The index of the GPU to be used. - """ - # Creating the experiment directory if it doesn't exist - os.makedirs(cfg.exp_dir, exist_ok=True) - - # Saving the experiment configuration to a .yaml file in the experiment directory - cfg_file = os.path.join(cfg.exp_dir, "expconfig.yaml") - with open(cfg_file, "w") as f: - OmegaConf.save(config=cfg, f=f) - - evaluator = Evaluator(cfg.exp_dir) - cotracker_model = build_cotracker(cfg.checkpoint) - - # Creating the EvaluationPredictor object - predictor = EvaluationPredictor( - cotracker_model, - grid_size=cfg.grid_size, - local_grid_size=cfg.local_grid_size, - single_point=cfg.single_point, - n_iters=cfg.n_iters, - ) - if torch.cuda.is_available(): - predictor.model = predictor.model.cuda() - - # Setting the random seeds - torch.manual_seed(cfg.seed) - np.random.seed(cfg.seed) - - # Constructing the specified dataset - curr_collate_fn = collate_fn - if "tapvid" in cfg.dataset_name: - dataset_type = cfg.dataset_name.split("_")[1] - if dataset_type == "davis": - data_root = os.path.join(cfg.dataset_root, "tapvid_davis", "tapvid_davis.pkl") - elif dataset_type == "kinetics": - data_root = os.path.join( - cfg.dataset_root, "/kinetics/kinetics-dataset/k700-2020/tapvid_kinetics" - ) - test_dataset = TapVidDataset( - dataset_type=dataset_type, - data_root=data_root, - queried_first=not "strided" in cfg.dataset_name, - ) - elif cfg.dataset_name == "dynamic_replica": - test_dataset = DynamicReplicaDataset(sample_len=300, only_first_n_samples=1) - - # Creating the DataLoader object - test_dataloader = torch.utils.data.DataLoader( - test_dataset, - batch_size=1, - shuffle=False, - num_workers=14, - collate_fn=curr_collate_fn, - ) - - # Timing and conducting the evaluation - import time - - start = time.time() - evaluate_result = evaluator.evaluate_sequence( - predictor, - test_dataloader, - dataset_name=cfg.dataset_name, - ) - end = time.time() - print(end - start) - - # Saving the evaluation results to a .json file - evaluate_result = evaluate_result["avg"] - print("evaluate_result", evaluate_result) - result_file = os.path.join(cfg.exp_dir, f"result_eval_.json") - evaluate_result["time"] = end - start - print(f"Dumping eval results to {result_file}.") - with open(result_file, "w") as f: - json.dump(evaluate_result, f) - - -cs = hydra.core.config_store.ConfigStore.instance() -cs.store(name="default_config_eval", node=DefaultConfig) - - -@hydra.main(config_path="./configs/", config_name="default_config_eval") -def evaluate(cfg: DefaultConfig) -> None: - os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" - os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg.gpu_idx) - run_eval(cfg) - - -if __name__ == "__main__": - evaluate() +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import json +import os +from dataclasses import dataclass, field + +import hydra +import numpy as np + +import torch +from omegaconf import OmegaConf + +from cotracker.datasets.tap_vid_datasets import TapVidDataset +from cotracker.datasets.dr_dataset import DynamicReplicaDataset +from cotracker.datasets.utils import collate_fn + +from cotracker.models.evaluation_predictor import EvaluationPredictor + +from cotracker.evaluation.core.evaluator import Evaluator +from cotracker.models.build_cotracker import ( + build_cotracker, +) + + +@dataclass(eq=False) +class DefaultConfig: + # Directory where all outputs of the experiment will be saved. + exp_dir: str = "./outputs" + + # Name of the dataset to be used for the evaluation. + dataset_name: str = "tapvid_davis_first" + # The root directory of the dataset. + dataset_root: str = "./" + + # Path to the pre-trained model checkpoint to be used for the evaluation. + # The default value is the path to a specific CoTracker model checkpoint. + checkpoint: str = "./checkpoints/cotracker2.pth" + + # EvaluationPredictor parameters + # The size (N) of the support grid used in the predictor. + # The total number of points is (N*N). + grid_size: int = 5 + # The size (N) of the local support grid. + local_grid_size: int = 8 + # A flag indicating whether to evaluate one ground truth point at a time. + single_point: bool = True + # The number of iterative updates for each sliding window. + n_iters: int = 6 + + seed: int = 0 + gpu_idx: int = 0 + + # Override hydra's working directory to current working dir, + # also disable storing the .hydra logs: + hydra: dict = field( + default_factory=lambda: { + "run": {"dir": "."}, + "output_subdir": None, + } + ) + + +def run_eval(cfg: DefaultConfig): + """ + The function evaluates CoTracker on a specified benchmark dataset based on a provided configuration. + + Args: + cfg (DefaultConfig): An instance of DefaultConfig class which includes: + - exp_dir (str): The directory path for the experiment. + - dataset_name (str): The name of the dataset to be used. + - dataset_root (str): The root directory of the dataset. + - checkpoint (str): The path to the CoTracker model's checkpoint. + - single_point (bool): A flag indicating whether to evaluate one ground truth point at a time. + - n_iters (int): The number of iterative updates for each sliding window. + - seed (int): The seed for setting the random state for reproducibility. + - gpu_idx (int): The index of the GPU to be used. + """ + # Creating the experiment directory if it doesn't exist + os.makedirs(cfg.exp_dir, exist_ok=True) + + # Saving the experiment configuration to a .yaml file in the experiment directory + cfg_file = os.path.join(cfg.exp_dir, "expconfig.yaml") + with open(cfg_file, "w") as f: + OmegaConf.save(config=cfg, f=f) + + evaluator = Evaluator(cfg.exp_dir) + cotracker_model = build_cotracker(cfg.checkpoint) + + # Creating the EvaluationPredictor object + predictor = EvaluationPredictor( + cotracker_model, + grid_size=cfg.grid_size, + local_grid_size=cfg.local_grid_size, + single_point=cfg.single_point, + n_iters=cfg.n_iters, + ) + if torch.cuda.is_available(): + predictor.model = predictor.model.cuda() + + # Setting the random seeds + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + + # Constructing the specified dataset + curr_collate_fn = collate_fn + if "tapvid" in cfg.dataset_name: + dataset_type = cfg.dataset_name.split("_")[1] + if dataset_type == "davis": + data_root = os.path.join(cfg.dataset_root, "tapvid_davis", "tapvid_davis.pkl") + elif dataset_type == "kinetics": + data_root = os.path.join( + cfg.dataset_root, "/kinetics/kinetics-dataset/k700-2020/tapvid_kinetics" + ) + test_dataset = TapVidDataset( + dataset_type=dataset_type, + data_root=data_root, + queried_first=not "strided" in cfg.dataset_name, + ) + elif cfg.dataset_name == "dynamic_replica": + test_dataset = DynamicReplicaDataset(sample_len=300, only_first_n_samples=1) + + # Creating the DataLoader object + test_dataloader = torch.utils.data.DataLoader( + test_dataset, + batch_size=1, + shuffle=False, + num_workers=14, + collate_fn=curr_collate_fn, + ) + + # Timing and conducting the evaluation + import time + + start = time.time() + evaluate_result = evaluator.evaluate_sequence( + predictor, + test_dataloader, + dataset_name=cfg.dataset_name, + ) + end = time.time() + print(end - start) + + # Saving the evaluation results to a .json file + evaluate_result = evaluate_result["avg"] + print("evaluate_result", evaluate_result) + result_file = os.path.join(cfg.exp_dir, f"result_eval_.json") + evaluate_result["time"] = end - start + print(f"Dumping eval results to {result_file}.") + with open(result_file, "w") as f: + json.dump(evaluate_result, f) + + +cs = hydra.core.config_store.ConfigStore.instance() +cs.store(name="default_config_eval", node=DefaultConfig) + + +@hydra.main(config_path="./configs/", config_name="default_config_eval") +def evaluate(cfg: DefaultConfig) -> None: + os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" + os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg.gpu_idx) + run_eval(cfg) + + +if __name__ == "__main__": + evaluate() diff --git a/cotracker/models/__init__.py b/cotracker/models/__init__.py index 5277f46..4547e07 100644 --- a/cotracker/models/__init__.py +++ b/cotracker/models/__init__.py @@ -1,5 +1,5 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/cotracker/models/__pycache__/__init__.cpython-38.pyc b/cotracker/models/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c571aa788a4e3c84adf8d14939738eaa3646c6c GIT binary patch literal 132 zcmWIL<>g`k0>-S$v{OL(F^Gc<7=auIATDMB5-AM944RC7D;bJF!U*D*ri)cfa(+os sVsdtB5fUpmKP5G%I3_+mGcU6wK3=b&@)m~;P_Q&7)edCDXCP((0Ogz=hyVZp literal 0 HcmV?d00001 diff --git a/cotracker/models/__pycache__/__init__.cpython-39.pyc b/cotracker/models/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a4d55e87c1f4b0ccc03f98a546af60af223e5a8 GIT binary patch literal 136 zcmYe~<>g`k0>-S$v{OL(F^Gc<7=auIATDMB5-AM944RC7D;bJF!U*D*u6}M_iGGTH wa(+osVsdtB5fUpmKP5G%SU)~KGcU6wK3=b&@)m~;P_Q&7)edCLXCP((08!l@cK`qY literal 0 HcmV?d00001 diff --git a/cotracker/models/__pycache__/build_cotracker.cpython-38.pyc b/cotracker/models/__pycache__/build_cotracker.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..670a0d299724dd5c0cc77ad9492f15160843bdfd GIT binary patch literal 867 zcmZ8f!D`z;5Z&38Y+1-oX)npmhhB7XL!nSgDS@@qHkg1=v>T#G){4~{lOkX0 zUQIusEu_c%(q4P=FXWQW%E56vu(NM8Gy8Vm=(yYU2^w5poqG-;e{8UA41)t))jm2- zIOJq(cbdb5jwukqIQt3Q;qEtQ?C>^kAp%5jL_%+cU1S)7@ek!l#YraA^Batk3z1}{ zObeZWEx9*}yJbAYRlP@7BUk3Bo+b-&gwZu{_zj%yS;P=Idt?qZa5tmsrMC60B{FNu z+co?kzvu$K0>|Fa8{WdR)g79FfwHWolSsY3l(MW}o*WlhA!kK@BDu)>MLZGxkeQvc zX`1tBQ*7L-%u{XrPjNmK@05}%qzcs|#&0?!+j&D&7eC$ZzkPMOY289iWp(=B*@N;* zb+9eKRar-v2QAnI4}98W{t?WQ+VV!Q69gJZ#3^UoSuzyH%GXgNZIQd6-=EH@rkAXy zIHvZJ&KdV=mbKJDjR>V=pUDgIfqWz2bIj%3wW;K_ZnvcmGQ~p!F>oKDar2i_i{OSytn{#zUsLc+G B);a(H literal 0 HcmV?d00001 diff --git a/cotracker/models/__pycache__/build_cotracker.cpython-39.pyc b/cotracker/models/__pycache__/build_cotracker.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d332f8c5efc4d75bcbd9328f8cc5b471353d42b2 GIT binary patch literal 891 zcmZ8fv2NQi5ap4QEenbhphJ@_Qvoe@T6E}86aj)R9lRuMQ7wX^V-Ur%C{T2b6v|R} z4f+9Xfpp9-aqZ+^$dVrAAW2Js$9Lo%-{~E*jg64efV1!>a~=;t&OXg%j|?(| zk|l2CCCqrjfk}blC!mM^H!tz9k3B*_2o6~stSF03VzA+XI?`D_mHNdsdHJc#r?o07 zlYJ7HPaDo-Mk#nDl^9MeflHRMK|OB?jP0X zdO)czy>W(#0D7>&c+GEgtpUAdw<+UWeLnP~9vT_BiTm?rY zRWmdi9Vrm|cgTanjudKb|wtJ6~DnJH9d{Z5eB*X|)$ zPt}t4jMDk3^-7hY-E=l>OlD+?MQ&oR(_K4u3nw;wIO^12n+-bcblWD-Y>{Cw-LOtr6rlWr}BW-RSNpy?(#>3u4&q_+Tb-^GB4$uK6#BR LGvJZOZ=>ihv60xc literal 0 HcmV?d00001 diff --git a/cotracker/models/build_cotracker.py b/cotracker/models/build_cotracker.py index 1ae5f90..1448670 100644 --- a/cotracker/models/build_cotracker.py +++ b/cotracker/models/build_cotracker.py @@ -1,33 +1,33 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch - -from cotracker.models.core.cotracker.cotracker import CoTracker2 - - -def build_cotracker( - checkpoint: str, -): - if checkpoint is None: - return build_cotracker() - model_name = checkpoint.split("/")[-1].split(".")[0] - if model_name == "cotracker": - return build_cotracker(checkpoint=checkpoint) - else: - raise ValueError(f"Unknown model name {model_name}") - - -def build_cotracker(checkpoint=None): - cotracker = CoTracker2(stride=4, window_len=8, add_space_attn=True) - - if checkpoint is not None: - with open(checkpoint, "rb") as f: - state_dict = torch.load(f, map_location="cpu") - if "model" in state_dict: - state_dict = state_dict["model"] - cotracker.load_state_dict(state_dict) - return cotracker +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch + +from cotracker.models.core.cotracker.cotracker import CoTracker2 + + +def build_cotracker( + checkpoint: str, +): + if checkpoint is None: + return build_cotracker() + model_name = checkpoint.split("/")[-1].split(".")[0] + if model_name == "cotracker": + return build_cotracker(checkpoint=checkpoint) + else: + raise ValueError(f"Unknown model name {model_name}") + + +def build_cotracker(checkpoint=None): + cotracker = CoTracker2(stride=4, window_len=8, add_space_attn=True) + + if checkpoint is not None: + with open(checkpoint, "rb") as f: + state_dict = torch.load(f, map_location="cpu") + if "model" in state_dict: + state_dict = state_dict["model"] + cotracker.load_state_dict(state_dict) + return cotracker diff --git a/cotracker/models/core/__init__.py b/cotracker/models/core/__init__.py index 5277f46..4547e07 100644 --- a/cotracker/models/core/__init__.py +++ b/cotracker/models/core/__init__.py @@ -1,5 +1,5 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/cotracker/models/core/__pycache__/__init__.cpython-38.pyc b/cotracker/models/core/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7247d618c61d2bffd1613b7181e002d9048075e7 GIT binary patch literal 137 zcmWIL<>g`k0>-S$v{OL(F^Gc<7=auIATDMB5-AM944RC7D;bJF!U*D*o{LpXa(+os wVsdtB5fUpmKP5G%7${Jb8WSI%nU`4-AFo$Xd5gmaC{vn~Y6mjsGY~TX0FTKZk^lez literal 0 HcmV?d00001 diff --git a/cotracker/models/core/__pycache__/__init__.cpython-39.pyc b/cotracker/models/core/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54e6ba5ebb2db59d7538d5690edcf15e88b859a2 GIT binary patch literal 141 zcmYe~<>g`k0>-S$v{OL(F^Gc<7=auIATDMB5-AM944RC7D;bJF!U*D*k$!GoiGGTH za(+osVsdtB5fUpmKP5G%7${JbsvjSpnU`4-AFo$Xd5gmaC{vn~Y6mjwGY~TX0Q*NE AQ~&?~ literal 0 HcmV?d00001 diff --git a/cotracker/models/core/__pycache__/embeddings.cpython-38.pyc b/cotracker/models/core/__pycache__/embeddings.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d14948263db9eb79fcf84a9dbd896d11db520fe6 GIT binary patch literal 3578 zcmcgv&2Jnv6t_K}JG%)X1W20#!-onBZK36WR4Qm`L4rdM5QMaq8sJ5ZYiy+kiHbO=v^8U~OI1u=aC5xpH1UbI-}`CXxx zDt<$3i)|+hEABFqO0n)#0<|mo1F`MeHfig&2g$3v%9TPo51efuZNKu#b67*#38XKB zN>t9k-R0;HD^~e;#dc5y=(9aC+;w*v7}LPn_`5FrjFQhdV^NH6buBI8{v_*_@H9@d zv`mvBd@JH4CP(8Wlkq@hN++dNUbq&I^8#jhmJDNiEHcdo@uZ*{m9j&6uz~n!=Hi@P z?ej9ma!I_Qlkr&TIPYWbvU`EfGsBQBrta29-Pif3OFO7XUeSZ%qMhSxy(rzqMQmTq z>;Wp371&ul=B)L=UR7;r9ip47`@giew?FOv+Ee4H(=tB7<$98b+<3Cw8Y?3duzr|bdn9A?GN*$ zG~q}UYqWdg!Myja2{aT57Jc>D^D_ZXdNe6jJJc@L)gG!&lV-C=(_NcwY;=6vvo`(R zFfO$_js4Sk$n+`9@Y{RWF5c+nrN(xYo{>hmRKo%at#0hYqc}I-GA|yn*CwVbmQb-) z#3K>%1^YM87kR|>%h0!-dh08YHAUB@yCDX@}<*}}@LJRq$hqdJkx<_Wgb*kwRmD_J=nap#Z7!v8PgaIQ66AlMfn z{*OR+gnM|$pd&^Z%6tSbXmk{$~wlE z`Q+wM>1EqcNXe6tagdq;NFe7`vVI;_JE)=gI@x*oNDUI>2B0i525opO`we9K#eht5BBE&x#V{!_X#QPqu3h6>AvVA>w!QV?g8nT_5?&kq=S5k(d$G@ zJWvk8J6~Z-Y=!tQb)J%mkIIl%Ry5!;O;^ie5x8kJv#OKg`{jmLI~9DLvD zJC_)&q9d$`6dhPKs$e=7sN5lMXRV?mtPah!-mDr`WZ!`R(b=rT4_uGoM-$|#IiqhP zTEpbd@zHt&frxFi)~|!T**hf&8<2PXJUIVo?Vu zycl1J`@>{FaSoo7qDRFWYYM|O>3VraDssb4mFsi>r-tYNh^Ows?y+V5>M0tlr+4#}{+{|1L71+% literal 0 HcmV?d00001 diff --git a/cotracker/models/core/__pycache__/embeddings.cpython-39.pyc b/cotracker/models/core/__pycache__/embeddings.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abc6341a9ee90bbd051e113a0d72cbdc1e3c3939 GIT binary patch literal 3546 zcmcgv&2Jk;6yKTs^g2mH8cN!aH~L^dq|G`m*&bTH;#xy%kRy)iPJWOBE&}XJoDzw$D8@R-@L@r)1ifr^)@?y z9J8$7s4@9@Xk10fe}WK}V0~-Anry(EoKc(i?WQdq$x#@J)8QvAD$?$`!hT{4N4O82 zriVUgjMjtJ7Xh^Xgti84DC*D#V%FF?vS#e3pEzPh9DC?CL&3kd8gK1lFIIzVcXc%E zOYQ!UCRw`DuysuiZcEV;$w1e-DiN(bxhJ)!WHC}{o71rI7vPtCyN;6o3Q<@kE7$|B z=1X>qZ}BZV4NB%%R4aJTE;*zl*kiusm^Nvvwu_oux}_tyupir79@<{%k>{{RZQB=~ z@Jn9WU1x`(KPauzd%(B+(np`^5y6hLUBj3f&c@qu;Aa?r!z?R|@U5;TdDI!D?E;=g zNtzT%+=p*@6i4J}6sIEU%2cYjFv^QJqG6W9EKB2lWR687X*U|>Bq5|3(uNJHFJ{h{ z&1&Bk5tfUibrlbXQbkz@a~G|Pbe<`Ov@mt6I_i$f1})k_HS(J3=9kPIOVy%u7niVo zIk5+*WSV1V)tDvYfw`*6(l|snSM`5qZf|eey|u^2W2Z%Qh|AR^PnmW_u{o5QZ)`T0 z4w6*J4V;C-5nJfBv8!D|$Sx^%pX1xLQEP-cOyG82#AzE?D=NqpnQE_TFII8dg|^er z;z9=lnXl3AwF~q19qlV9Vk~<0$n(=aPI@pZWFt@x)>SSpnvm zLURhiC_o|z5k8@@jm9omkDFlPf(eHLvZQcv(C^2Il`B z@Do@svc1@iiUXlS+gc9IN-u>0?(2;LKuzPc4cOHT)#xU{m#Hd7F^Z64P(lyS zt0?KQ3MYltBhm#LuI8a|7B1wzeGsQZMzq(FnV62=o5G2*bHn@Ib}90?~g2vP0af zSat$rbcbCO`mT;Ezkij?egtG7(Xj6MgHOfMm4VoY<4U7uz@~^`v?sFB?Y>kCrXj(= zqk*=;#K0mz6G5%shG_T|D&HqNHyg-qtnF-6Ah`fSVP92f%vl;ktT=(v_t5+jrDC<| zSvJq6*c1=h3`$6NC5zRDqiXoo?79vS3axgQDKUmFJrgg!DOAXH1Kk2>DXpG$3vkGm$$OG8;r42|Q7_n75L?0GxJ1D&}n74LVS$k=RpxZ}w$G&XAs&V;*k3=x=2_z28;$$w#L2tPvPQuPUa9f2Ce_m2+LL-K{l z1ZwpVm|IR#lT*FdPy{GWiLJSr$0+%y~9-$|Jd8do4!ow9iK_eDO( zYfnmRNBLWPo*#W;uN~uW^96n!tvC20e-l&}_zC_E{2u4_U8{cb1-P?T>P}MqVi0>_ z;P#W!>Tu8(y{G^9dhefq`Qw#Nfu@)bwTt*($2WQxj}au;uFCllsKwTnJ+el2&lx$~ ze(Hd}v+~?d3f%LtLn#hrXEP~u+_+vyDzONnP@YR0=Vb}xo?DXC`mb|0me)IBEZxqw zkhzv0a?y{_A;r3Cw}!FTkD7zsWG?bu8Mi?F7%^AI!?LS((U$XQVcWs5SuZBV$a^S~ zve?1?aZ>K!DP&US;=b1r9g;2m%%MS4l&r^WC9A#d$flCh<~DThyaRc8LAM}xfft`T zymZRCYe5HPXl7+Hj+R-|bHg<)cjh6GQfU`a7{uS!_RK(0>J?U{4cG)2Gf;eKG@V<~<(8Guno!AX}!@eug?!m-r zHHJ^SC*qc#-U@@3+aeN6bT|#B52xN{JrM}$#v%ftWjBuHa+_wf*~YFSGS#Pe=!Pi| z+>2uAZ46_5x95l`D_ zn|0iPZ3wdgG3#dBq%3q=yNyn3r{2zqm9c^|iM@{7XT->6D7}Ya5aU(ThlWDQy@1r+ zWF)ojSCqKuy2CzB7Mdi^eUJ0LNafH9GeX?fCIH>C{1FTTuNz9gzY7(_n_e)j0M-}o zeQhxC;+_|%U5!-!<$K@%!G$*K%FtKKHo&NmX1~v3=rJD3KpBZSigOLN9)qn%WG-X0 zJs(Dt>uRv`I4g}o5JPQejMwaXyYdV^w zsiT=~qiupM#S~01Mf-s=Ju-mQbd=Mk_M=oq+ESow);3sZB2&7N9-eHav70Ty-=`+u z$Qf<>H1Pj)>eF|$!Ayi%Rl!ILq6xnM;9D;oJS4TA+wwSm5YSduUVKcw^qM%{R^3jj zUcki;%=g7hd?W>tdRbEJNXpv8k}B+PlY~vGeJ_XxZbu}gC`NosDiTBPK-A|XMK1XU zJr?MpwkT_dSgT0DsA%n`jh&=S^jf>-cSl7|{WwBfpEw9%PhceaCLY%Js`h(M)jsLW z+h_4tv`^sA`A@ARzlG78Q=zbm9tzLb@r~MejO;yx{%7`B1^g2f!Vq#0?#{;co--<9 zq@-FlT4gx9y@lQp1xV*n6$3wN|8dJ>>m44(rjxcs-#E8Y z3?i=|280s(iDdKD0)1V=h!iX$Q!{rHL6M89LzogfO#Fb~4oj*ljV0NmHaleZnMBB;uG1f=C#e+d>S8 zE~%ltDu<$tz^RZVouTx}{9zPR%R4Sqw}Idphl&>M61hG_%amor^{G=bid@A^ z$~j~g=s|}ZdA>(^6>YDQ>!xyudJ1#Xfb9hkhAn9Zfu%~7#yKWnN*w1V(d%RoEGo#K zhS`j7@Bvd*htp(v^j!&qTxc+AV@n%h*qvP zjlNm{R5_}~NGgT_R0DHjX8>bwxsitJ0ya(xpWMD%uO=m>prlADPzxpMJS!;13G^sU+R&Vs!F43-V_t|n!j+f1P^A#YZQ z?BQi(ZK0s#&aY7nj*GE9Dn3RLvR#hejh$cF=Dmng=qNOJ5N@Dt~(xW&?wp*P@xv8c>?B2>F$zp&fn1WUQX2j zPg4a@>aH?(qwcGH&_RLY-4|(zl?TDJz#_gVxlPGvm9J)6H#|Z49yAO*R{&0?ZYl_> zeNcO%2O&455`&ouLu@6CwSC2#u8B4{d__#f9m=~&KzgCP1_-JGI8eV&2|NlZ?io8% z45{jDZ=g(uql@ zgm%=NDYH#AM9izSHJ$sjvyFRa&V5OiOGUt0h$fN#LON8QD42D1Y4U$9y`n`W)fqoT z7*$rq2HuY7r$j+fggxn0v6$dkWJ*xlb|UMvl$nf5s&tv+^c8a6H95ird*DEW0nEoJ+t}wW>TA?{ z*@-F9%os$uMW>c`eCjhjAY>-SuO_m30Fl)%56e~v7AM(?hBKIi_E=>G(Ihj`m~znk ziNWTkPuchZiOBWrE(8{uf%a5RV-h~4Cj#Js%gKgYbS=5R7QL-U8I4XSGwstl&2f86 zHI*cw{Ip# za_Czx$@gehSzD(3fYx20o)QI@Xok&XVOB@K#N6m49@hKE9At>jqEocX_JUP>Su37K z_9!o7tV1W7ekf6-|2tgAH~I-4lqjwj$IiGwe<<2}pa&z|}#KwRfj=1>o<^wI-PtuBX*3IR115-dvcCwS&X{3LV3Ao^-3 z1gKZa6z_ZDf$?AYAr0Br61$a@XoG!@dt7gyv-v+`NkkWT)?3F4wL%RQ zo&y+Md?|0BZ^~l{O)`(|;zfCENqKB>jK7gZJ_`?pfAZ3^(incb=fGFViwnk!OQVvR zRm42ZB0nCL;KeBU)GxeP{lcrC5w4frr5Rri&%^>*rhKhWU)=B04??-E?v|8OlYKrw zcJAur-5BRSE$Nbpat>t>2kfuX4Q&GYrgj<{$N|~KgWv$Edagz?t5QcdH3j~mWU2(< z#~u8?;LiC5gP*48LPY>!T#hfrO16yhd1d<3?s||z&3a6>-~Z(@IZmc6olsLtm&<<+ zWF8ri_SVDhzYlRGN+EQZRcQse(hG{r#+BqpG=7C1m-yI-blW+q|s=VpD)S~|t>v&!O{KMt@p&P3kadknXmE)jq0z`@i#aSjzwa literal 0 HcmV?d00001 diff --git a/cotracker/models/core/__pycache__/model_utils.cpython-39.pyc b/cotracker/models/core/__pycache__/model_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ee9624a32e7257a6932da145e91fe3e15472571 GIT binary patch literal 8255 zcmb7J&2JmW72jQcik2PQaUDAWimBTMVnfp(v4d8Loy2w>r)?C&7GOYCO0GCVYGv*& zGrN=}n=FdjJ*q%%Jrsr9n{T}Y=&iT@4aFYYlMg-hP@rhx{@%d1Blqy(nuU+OvE99X#{RR(ial3D==wI7+9Lu)+!b;Jm9;aXO z%PVESvQoi&VWrB8y!6Ccnd7#wc5A%+)b2Xskf?}xUgiT@T2?~TCefr{B=-0%1`ib;P)7}?^^X!&%vFwRCki<=Y!Y_ z1Gk@)mWPAB=)n6;`43k+1)5?$)Gp$C9pC7ic#I&y(30~bP>Zb%dt{C5o-=Z| zz2ktsv-HeP3f%LtLn#hrXFVx&+_+vyDzONnP+mwI7i9_Lo>`LA`mf99{2)HZ&vn9B zx}Ez%W?Fv8ML$A^6z5dCHH^J})EsOlbCK`LxP={#5QAkrEW2tKZ8?t?wjKe2Jd;Ml>p(6dIR-{YLVvhToVMg`CQwQc&#v;AaGJB3weTp3p}r#PyP zDm{lE;`2MsZi(AyIZq2X=fmz_ptes$%v{!!9%o^f4MH!7Bi0Ewh4g|RMx^M(ZqOU{ zU5R!N=2ojQeA+z`xAgQ@7_{6LkyxNZYA}6B^)~B?Ku9+h5eO~1aV!_xG^5SdwiS_F zpW>k#raW*jilw(Uj0IFjP$-wm_ z2@6q0Pe``tyYc#Bd*N2&9(dNYwn_RJOV@?hTaVQ)Zot_CkH_n@n8u85`oe6Lx?_L4 zp~j7P+E&}F;|6R^m<@>cK_v#VXiwnz8Ik1meUVcytTcF`X{%CLu?3**%W z+i3iJme6*V(5)PyTL%;JCS+D68Dw3HkvS}_L|7HhMurd0`tC*uGtX%HW7&(Q@{(rR z0(_vpsNJ7Bnv-p`O|XTS5(}niKTxJe29TPLa@y2>l&VNuDma_94HlX#m2RYmr(5Y5 z&KBVBxyd&&M%zAvJa8uU={wqBrot?%#H0n$gr6huThARlB(rij?zPIQPvKzQjrLwqP3kiwvsZ@Yi*m~EtN&};}C6qV!sG`0wd9zcv#=7 z+HX5m`;;?ppT}R(K7~K$Kedwl7DjL8QeqW7l&Y`e8@2Hm*}H%NPwlZv{wFAvA@v~b zosaEZXH>*UNwsXW%5Zjj1HB_kl+I3p#_(=W-piv>To{$#u#glhqY9Es!Tk(s{LGE+ z!yc8V+L+8?!4DvsWHHd3&@P4SmFsD;LH;TQcEC?LTgLY_-F~ z*mTmi=o@ENib3S{!+@g1ULuqEYJrMNX&PE;QLX)>0c`$|Lf?h1wh54>W!pg7(i7pC$izqKjJ&Ob6 zQxS=1?Z~x}9XlHZUg6azR*Vwlv3(`X5}(^~5XH?-W&Gz)=a`b$w+1Y4{yRgzwfoq(1?hRfaqW`z}?WCb*}AE%*c0N}vmb*M>%> z5yFJ-C!nVfHt#0P)i^r?WYY<%D{8ZsG0|jzk|uPLLn`ltD;n1df@XIZbacVoRH;|9 zvjE0WW++^%B2Tvqn26vwLP*#!t&ut2p9<63L2!rc;Q-u zU2m{k4TegiVeqI5NE-KEo5cP#H}0$>Ku|elBKh^n;50FG*4-dLW!=zWO;fownP@

mAj!Z(?^sUN8FLFI`aB;1rr3}&Vbv86E9_7$(YCfDHb6)}}{2z8Ty^g^fx@l&O5 z#QZ+Nca%}wGj^sJQngV#Z1^u9fixyPg za2Rl1QBYI~Sg8tHnkr5eI%=JwqE;DOI6zXBSiqiEds+^s zJI#u!sKQIrVwUiEI+sL!m7pXR6C8_70ZQ9WWz9=Dkx@yNE>oPoM9#YgAxvWr9B43r z`50vz``ku#jcRXlVoEeK22pO&sbw9X`b-Z9nThesi7f9!WciDOunL96DOS^FaEu$k#O8{Y?pT%X*8z(OZMJBMft+*5j@06cIx*>H<4C-+vOxAiEa z(L9uCpV0~b)upLUHrzxRExUXjXn=4H{XQ??A)9zK>jg**`k|th@Wfqw z)rBsu2dytoj%|+XT)Lh-1$6NkSDartUs68TFzdDb=E%U zT7^PxbF-oI~(RpHYWKY<$wU~;J z^O!V)!{kNu{gL|Lr3dK+7t@DxHjvC`&yTR5O1nv+F9HP&_Yl6;$Hm9;g>OSIi(>M2n|iDvjpj?OCO7nmEJ#KU^$hyzTBZ_zH>CtlQwXMh&v z6^wM~@X`+fL;9b?b$p|r;6cD}wK#Uh1^Ppgj>DuPJ$4A9J$C5Ec^4%(z$0#~pB8ti zY{#uK;3RII)o<~%b;45jyJhzoHoL1~9BwRP4|j!K;o`1MLi-9so3fH&Da>!Syqad+nyg%JXAI>ZiRDPU; zq|Mm{{}VX7qk+`B6LiLeOE^p`aUJl5^gtJ(;4T$tdc#O~3HuBqeTSjA!dk)rX&531 zv`9d7u{~=)xS*u8nor?ZbX}^?YZ;rHiq?QKVhP|PJy$kB`cybZ<2)LkMWBkSI{=~+ z+%Q{b=41ElslOy_YXoHu^}xz5J>b;pGOyjNFpGS6 zRDuViz*E2QVD$@+eoEoH>@Ljsadz4@_wNJz&ukCnN_Kin+gK|JTf%_V#gi)Z{W_w z27`~Lk%b^X#kUM!ij~PSLhs7#^RBv|GtGL-WWWE*T{4{Ht(+pJmM(*K_GKP;M|7zNBLDyZ literal 0 HcmV?d00001 diff --git a/cotracker/models/core/cotracker/__init__.py b/cotracker/models/core/cotracker/__init__.py index 5277f46..4547e07 100644 --- a/cotracker/models/core/cotracker/__init__.py +++ b/cotracker/models/core/cotracker/__init__.py @@ -1,5 +1,5 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/cotracker/models/core/cotracker/__pycache__/__init__.cpython-38.pyc b/cotracker/models/core/cotracker/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e970a1748662fc72cc7330ba4fe05c5453315f14 GIT binary patch literal 147 zcmWIL<>g`k0>-S$v{OL(F^Gc<7=auIATDMB5-AM944RC7D;bJF!U*D*g^N{8a(+os xVsdtB5fUpmKP5G%7${JbiX<8zpP83g5+AQuPg`k0>-S$v{OL(F^Gc<7=auIATDMB5-AM944RC7D;bJF!U*D*jec%kiGGTH za(+osVsdtB5fUpmKP5G%7${JbiX<8zpP83g5+AQuPSrKk0046r BBufAQ literal 0 HcmV?d00001 diff --git a/cotracker/models/core/cotracker/__pycache__/blocks.cpython-38.pyc b/cotracker/models/core/cotracker/__pycache__/blocks.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73f41bdf8f95c01c8eb1b95e93a781da09238449 GIT binary patch literal 9962 zcma)CTZ|;vS+09mU#E9=c6WB|B$MEnbV4@SHCPagV|(qjjn!JzJRtFa7KtY!A~_-E`~Ir# zp4nYXRIC0vb?$Y}|9}7G^vlg=&A??2Zf>0Uh++IYUk+amUY@|6{trlGMCQ=w7Eg0z zbuF_<+at>|Ok?D9UCFVA-l)>8NZKCyqiVNW)~t1F=*1Ztqk6Y4dG4?=YId8F_J*y| zQg;dIO61=%y6vbM)$lwP)uRTU%W*qu?wZ}>&l*uHTDoOKOR+Ur*|obTkkgKiA?H|` za}qhr(Q)J)FLO>IXC*p;oD;iNcNK3Zqf>Z0RhFMd&T4cTIj75fBFa2Q@0#z=dY<0Ks`rc^wM!wjkRmHx1w#Gwk(a5Ndls%Ns7^0M%> z%R6l&@tUz=MrLH)@~39bLGAS;4cM4_aU6Gg79=xrBQpoqz}~ggn!Gu?R=SqCxv`55 zGB@*b<8@=#;@fR=y;IS(e%epcEKGW_uJ^{nVcg65<0RE~c%`TP=d)Pxi?p3)O1s%M zr|n{_+om>ekR-Q_w;p}^W3TkanF@Pbu{x9*jiY#&B16T8a<2@>y{+`@Tq z^Y=gubJg_Cos&T#*25%Kt_=Py!b{x&C++VA$u@Pbqcg6X};rP zdfq$d#%IOT`%&ZDkKPbj?wkOU^Q*OFr4f> z`NE}7u7&B^b{a=({bcP^;E{2%c3Fiaonu_%#rUJu7jL_NKMGmYX^1M)ql-$ex`WK_z!~vFw4LIs4VT%8~;U@3q|J zR;$nDZVnkpJe1cDShIm%tlUN~=De3?*%~?0x)osyJMQ`&3LaR$4rWi{VUYHBVpZlj zSNdVvS<-I0J&Bd}gP@=EvmnsbY#f{~#D`m`tt8Rjg>iEIe5Acgf(qK+=$(Vy^)g*~ zT8$^;?M%B766YX?QJiG`?51`SYyfMXFUrn$>e@-;;fAjFlVAftv#sJ(FJ0|NXtKy? zptr?a6@vuB@Fu2MV~0grt#M`!hwS>;{Gf;V0i5w3Ow{;F+HI8hB~7se)gb5%!!!+o zf`KU}>U|&|!kw;y81))ty7;YHD@e7?Gv$ppt=jUc$T9S+NqN)SZj9+v4L^rf9=pSkq1tdsg_ zzBkKd?8U?3Y$;_j{CWX5^#D@*h?MB&n~rOlmg(S@-}hUmrN%wTATY>g4};8+p+Es3 z0#Lw?+{gn8ID!H$INaCG7vr=aVfIfEsT>M8J$TZsTndlXb3rT!UX>gIM--Llm0 zptMS%jcR)q%4>JM*U`VVWu;%cE5DI^13lEy!*?@#znNPF3>-?&T7KtHULzTzRb$I~ z!&DCPTZh`s(q+3h%=FPiH5$ozv}{UC<}V4jV76uIXse>MQOq4$rf3%cG)_B?dN;CYkwj5H+0-6%c0Y=Bqs$<4#DePdnL?zSG7O49Dqtm z41wbKibB&lrl`8q$4LkX!uaa_NNJa_a;|us*UpRarI)n>4YIAP&yvyBiEu7q36=N& z)ba6%u0O?bSq{`}Jzv@_<$LX=_}Vs6G92n!G=4QHjI53)Vy>%zM|@)faP-P&g}nwF ziOg92JZC8aH^%{)`8x8`OCW~XvgQ&;xqHj^Pa@B^2x@IW+cHuO{CworBvq4Ci5Wsn^_~CV(;oQ$tOr&Cix^u7o=k^kW!mB?*dDKu+O1d36zd`(maVztyw!K zj(+3}GA-dSVcpxF`WX&G!$tipi7*RMQ9Vu~dm&_jg=DH1aEpqbxNwQ_q#8@i^dPM6}d}2lzy^8T0 zg44&3jK5O0K`qp+MolrlPJr7^9vDv>pP5XZ>{RZs)-KWXqe!g|PUp_x44t*8y=R+7 zaQlR2EDJ&wGW?}+% z-c{hjM5Ijwji^}2-7g@eU1H%mT@Oa#R$N+Xo>*Mxbn!f2 zJ|8KcKPj?WGz3@@Umwwx!W2mrSyMZ-L-o>D*dIYP1Vp}Y7-;X+xW9Qd(^a&g7nlWN z1a{mxXii`^M*Wel;p@w^tdKTG_e0CCV6gN#5TK>$;a9U-W?OJ`j-9~G7EHiZq9#gM zPpp_9_>~sGqk$aYDxPiJJ0}i{;T$MsvwB3x*+pxFx1c4lgPzv_<|3$A8+*>wMqZf% zxiFFAKsucb3zi1u(-f`iewBv4y{;nkA_HMPM3c!D%4W(w{xZsE`o+c~6|6I0t1Xan zchW;3CI)gJWq*o0t$`R=iDlEbJacFD=$9_G|A=|a?Ict4+tY7@&wQM z&DX7&iHuZfA|q9r$Ve3?vW*n1E9edfDKB@Ba*?X!K%}Ynn(>P-b&reAt>kK@ zl%k9qiO9w4)&NlVI=bX@Lrlxa-gB^B&bszz@s-OkMK;h(H^X6nGl3riM@^+2Tbmc@ zCn2aY8Pj7eHmrrJw^8<-Hh;0ubfR!Lx?Txpox+pL;-c9=r?c`*k#foEzw{(}LG{HH6zJDp=s*D)QBwpVW}ABNFOngk#1EW|-XzvqEDhEE(g$%d-(uuPkEd8LJmHT}z zOa&@R6_OA`5$PbTb}IMP^OOX!AKqEne->4~j9Vf^Q1+dgub9`vedKcR_5|*uu1=l0 z^A~e zh)V9~)x5UhyaFpdAdb*Zr`D$|K-VL*01rp(fXXJdKXe>u70Bf8L(dC|Z>DT{_H*5w1m| z9i4UA5z&rPswHn2Lh3#EpN&os$0yzimD!`iMHovl~*SBz zf!~hh?flp&W4fHR^ZL=A$n^(KzKm}<2Cv=bx9l$!_#e4wThFWe%Xyu=eeE&K@;Cx< zZshM-)8qN^{S{b?&@EBzz}Y`>U`R{M5WcQ^VAFbJEl6m6yGcV|}uoZbDb6 zId7|5%=2O6r4zWv0O$-cZtai2%$xA7wUv#v7sc26y;%J=>pIu_@vDmXQB3EznEE11 zDnRzpb{6XzY^|)nxjlv`xf{bV&IVQr8Sr}Jv5Hb{D)oD8N3PfAx;CF)%(Yu*B(Ib` z^>x;O!G}4E5NIp9fg7Q9MBcNo+vvd~R^xsY46hHnRmqejCc&YY);0~>qtb)efgDkM z5B;a~pA3i%OfndAG}!3YuEKzOpS5PL!FaJ?upu1vzn%+|4RsziciIb~D!Mu%QVhDy zLG?fJGB;vGoWNA3`WW&o0_eboD+{OM|DV%Nh z(3LDyn{k#dSWMk!9S+uQmW6@L0_+K-KPOzKewUR5XyWpU$o$xto7$dry!FN zJ@6G2{0r_>6uE=IB!6$!R{i%Vmi2HSxoAl6Ns)#i$`n;+U+iG8+W<^a_IO2p2&(J#~sityRhbgh30Wu3pSZLa58t` z!AtRQh+!4S9FikPAH?@E*Uwzo^QbA&!fMHKn-~n&zkN%+2M@v2POb1&sVEf>m%w{C zZv)C*jl(D{T-Aq}@8Iy_8qLjM>pB=~OivgV9oeZ{MbJ&%6c-WahcJ%%pYr1r0EU56bUrd@>NV3M=h{k+Hzk!{-Oo^f|yEKbi_2FUCz&lh>2t7jX=`rKr$ z@&(-KTOa}~GaU_qo+9c}_(kxEXrIA@$t!0d@cUSA+EtMQ;m0Zh4vKv+u%7T_5TCXr z4r2j8X!ltIgGWoE>DX8O9r}W8C}X`-d5!kkz7Lji53Dw=iz+ywfk-lAci+vCf)1vw zSCjEgKvOfXwGFgEbm)NIop@ca6<~W5-WGJHzg{Rp#z6|3?@6ZS;B)6k)Srz4eEyn6 ze?vk&rv8Y8OrdDxNI9+S~oIs5>uWIv}Hn+5SU0dttal(u!gTvZ)A&EE$kCG$Ak^@|L*2Cb5Rd^?B z=9+a9QCEP7b;jH|w&0Fguo31ig;D(_$iUJb!^r^(GYQ*R8M8 zH*xQ&>17q*3T#Ap^j_g7LEZU+HJEZ#<>^CEAW3cg;^y9e{n`Ha|NfhgBM!D4591L) z@6|qz3&V{K9As)co}~1^{|@t0-v+s71qpvI;T9j7zP)I7grgC30oa7eRjffB4ul%t z=tHaVXyGCD_H!hQt($|q_8%LDqbpJP_@}Xd|BYJ4&O`I1og>A<8J|@zf_9D~RI76& zkC6Nn$Pswt=(>7|C#F1ZD1vwY$f}IsX&bQtIe^plXgDdoZ(T1BVda2WR^y+g8&nCP z!24s}kpDsiTR3anOm&L|WgCXv$`uGU5f8i{O1?l|rAKf0HL6e`B+nismR5u$h?WI= z&DUVP$JL)*EV^Kyr&G@B%zn_>7D%f|8#XZ)F~!Z^hV$Wd?f(zzNs_3j!i7 zz^8??bA;3e9$~X?AYf!I!MF#kfhk!NFa-uf@Y@Ev!p-4dT#yHsxNwLM%pwmwF>_dn zTd?xK!w0nOc9KH%#5=LZ|1u`{7lM-gkafO=Ixt@P$;KF#aCUP7t|UxJP(`CqwylhE z7J%i9!gNbI*G*Inl6W(~e?Xwz7W{UcEg30cv?^Ug!hkvK1C6Y=9sJ$N!(TtFA6ai( zFoS_lFmD8r${m;?68?8nyW5T-&?s(<@VWM0kYnWk&tb?vq0uEyr-6jeW8oiIPUDB| zlZqvs2TB)4LPckjr0DdennKU~81w#vun@XLo1UPBJl$Nt0ylU6TODIKHm!$esjy9iVY2*LGFUc29S8 z@2Q&g?x;E>nGLoBV~oQ^LVlPrAhEm<2_f!acz`Gmz$-@}At50x5>EsoIU(lz{;KYt z*%C9jYZ(8=mxGssmxpmD{{s>lp*b`<`O_R( z9m~wq_QPt za~wI#;ZfuqEpkpEXC*v_oMSsyXBBV9!xMNrQIwxV&T4oPIVX#pQ=$31(LQwp-)^k8 zO& zF7Bj-#4AS649(EG=1t9vgWBtd8n7|<;wbLqG)QV>MrsbMfxTm?HF^>$fT`$<1e(je|ey4D>Jhfz1}kK;t!!G*5&o=zjhFVc3BD($9Q zoVJUxuAADtLK0s$-hJTSQJmfz-rF6gD(G%T>Og8V4x?d$3>6*7y)YbiH=b_J6d}(sIv$v;s_kco zlkG>Iedcp(L9(`$MB!RLUi&=wWE`)ZS3#Wg#%dI)kFTN1C!XzJiPTyUhifVt1}Q4X zQTp;&Z6@pIH)KK}=em_y63-fLvJja)`36X8>==9IKF}hwC#}q!+L<+3#nVcyth8fJ zOL&&?tYn^gFtt;D4FqG?iXEg&(`r`D%nWry`y`NL*Uk*ToH;vKcHc{#y-HSL$-asA zYUXmQ)fY22gB-*ql-KrIvyNV@%tkNfyq8AN8adLs8Db0D?)nW1AK1SJW{;zxpY*pQ zRpdDr`a#lO(r&UfiInzyzaRHg-`ACN?4Qj=h+C+wIM$`d#_{E|p)Nfm$e`_B_Y7pO zo9go8YCIWlrP>XVI0IP>qd4uSSG5yk16cEHUUs%!(@qi%d%D(-{T{$(OGSxZy4VlV zWRX!vZ;Q7I2Jwf%RZOwU4)e5Hmy- z9mFuJ^WS!jQ!i??E8OHb7)99Rs_z5xwuVgCegD!{Ff2;Sz8{XeSf;ztmv)|f;+Yp@ zoz#!=y-_SBt$pQbX?1{Ob568zSBG@HEux$fk8IA2V{;6 zISK#~fC6^th9#hYBPig4!#&-2E=u|#X8$OmTDu!+yfsmGqKhttcXko837{g(14uv$ z6$G@i_ACl?Y7ej-8GtCXGKT=P3l-$Ml5get$aQz2qWlta%OQYimxAh{)H`KN-OLUv zo0j@jlvXITVRhF+dG)6E8v3_3t>n#{^6T;2=%I!lzLna0jm*kn;6QrT@|y?p>hTb* z>YLUZrgD(qJkV~IF510eCZ9S`qaL3{%Z9XM{*r(TW?Q5Vx5`WF`P?zqQz4l0mPx_dE*0eFdaF9d1#qW0$2 z15hcBAW$4%QE1vn6jhh{GzkGg7+-w^DeV$g&g74?+IcQ|<^}CQgKX)_lVr4YBAiQD zLM1)`b$r~fYmahVmIF1LPZzdJ@m_sCy0k@<42HTIj$e**L#tg9G1nEqBfc>PIF^cM znZ5eGSZ1s~!&!>J&2d0xzK;Bag@)O*<`PG_d)M=hBhRx4YArz9GE#N?Jmgj-Rh3kM z9mw-+$6R@D`}iRNyFCnYd#TvDQOR>4REh$H1VLjD60{Fc1T4;Sq5AfmgSk+ZRCkXf z-upI#2Rhk{3umN74al+Y6l3=GB{^QUL6Ws8+zFV;(n;<3p>~ z_OZhsIRi{f_)8e~dP)5lhoRY`ew;*Dg_x)wB9WaCvOq$z)U&w7#KTJ8Crs8w&Y^3L zeD@)Hj(jz-e_#m-^3}z!Fgs4+SGas5p;6nOC;>KJL?>2 zF=yr|_{CdL5JrXJ+aXa6dlOoxDC023_sWoHSMbP_H!hCFA*Kl1upMi{+< z@#})pM-Pp^QnW!W)UAXKF}{w0+m7!Wj~ic@Or7*Z=CIZdvGhKqRtG0DXK;${TG-mP zO(Q;6)G13HPfEeHsNZ4zy*nhQvJdAHsXNKhD8rf+Wp{>0SoVf_$rEH;URPufbqeGL z1xPkeJ&II;kh#1s08l;2T2yb>AuLQ^AUQ`uV_%nhLF~hT5EhAPz*kCFBtl|^m5eSxR~k++RzV7 zeK7&s?i?~Fuo}buNLTUoMOs!!o1@!d<<~G+@)QWL(k$UuwVGy2P;-u)K+Pr$z*S-< zN?1>9nD2V!CcvYP9H1(mE!^A34vOI%C}p#{M9A4iTZFfuC9#8k*8s*MsMs32&eTR; zkpsCfk>fx*ot_0dgYs#L&UIg>nQyPF5WUDiSP#)-vW2plvX4K5@|kwAu}C@V4A^QD zq}ZM00Emfs+(+3T;7+O_23BI(^sJJ(y?Xdd7u$cxIOcYesrl{6-+*N1z7^B57FH8hT4^J<_Mo4poKY>;JxJB(sm8)khf8&fgU+qw zY9*ASlpBfI#jDl;Q1>dj*x!iZ#lTHdN!!-u zIeJM5YfQ%USMv>PqUv>&J*CZ`$~B!R9FDG6{8^{) z&xNT#MX3T3VkqJqgw=NWwtAkDAoj!iEBlvFaKkE?rmicGjg=B7N2YsDv_zN8H@nCklsUnbzeGah&7GE}<*7HV zApB6yysVN{d(Mln(S70wt#oRA$^vvf0t@hO#11HLQ2Rs2fmVP_-X8S)z6oEchBpKs zRuLsei7zcFpdOzL@yqP|Oac`?GJ zP_(17E;}OHQ4p@Zq;~LG+I(J{kEx$WQhkYpu0ies45Q05`b#PZ`&)@_^hUuX@h4YR zFzSbTWrQ$QAE7~C;(rojK;>XEiQ*7SDTp^BT}Dhom^+hRgUcKKWIP_84cpCJ)wt5B z+U3Zvle|G9b1)xOzsQ$gBAL%moBd8HpNaYfmMly~L0RI)zu->De}>mG?-YGRom8^w z5ZV>!mnNRu%X1!>tCvHFD-JjM5DuW1i&ZsXbDIMbxNA?HDY$`wBlw?dpE16|RmYAG zY|9{zY#7r@O79JbiJkp6I0ieNRZkk&X6T4&9)ya&IxMq3INI&C*t>Grho zKKaeGJgDHVX3eZ2-nF>$yMnjy70u}qc&I^41`oMn3!>+J2fbI18h04eR<^X)1TW1; zJD38$9m!hRkrT#rIbF(XhkGK|?>pHtzNH19-R8HzPZsH@u!5-yCU;B8@Ol0r{>!>&RN>?D(y=au(dB+OQeS6X=W;)KSrISt;eUmxud<{JBp+?1k*>nF zO8XmIV~CF18;)@ru#)q9sXHF4Fwv$`zs`2#a&4|_^YO)8J2l4b3RzRX!5T2|FlS+U zZG~5GBan_bdm41=UHHFh+z=x zK%G?vhKO2qS_5!a5Eo{=*Uf5K9d4I1r88x!w*k2xM6;Y$VYZaB#=bReX3e6u!`k~U zb5LH#-Jrp=*W8EW27zj#kJ8>!))clM%mDW{@x=?yY_|k(1>wMAlVLy2aZdnFpzbX+ z(3J==7o5PrPWfea@HUBXXlWFrKwZRrFR4_QQaNo>zr%7eAQ1gtJajn?)JBvh3+__C z%Q_sV(~P;gQ5(MR%aoCd3dy@E03gDa~_qgtEbWQ z21VxFA+v!dx{KS!o%BEsoO8hGSTW>W14?8EhC5)-JW9jB79+t)T_^|>oQQLq{5(T) zOmeUE z9_BkZrMN`P(%-xc#v0SQoS2Q&+_s#9<+<;VZdoSYc>TI_h}vf+VTEOPiNr z@&-uX}0HqrQCh1MMI(jPN*Yf%rM=zGNhnxY1UO`d=seD$f_*^ zZ4e0Brz0m07R&;e8M$KxHR-MAI*%cb+`M~)sX6Z4_&!x-J;xo|`RdO}sKV55k&r3W zUw}ZnT-=H`aa_g2zR5U=)SoiDoQ!+vD7d0%0vAS`oSf)#!3(+)q-iXBE`Zaq*3Ssk zX!D{rFKKgAo0qksy_ia*S6NzmCx4&SS><`e~f~UaU3!fyo#D)&Mo*r0p^uF2r|Qm z2wrgyhpISOg_!m%92eSene(t>qf>@$!@nz-9ZxW$f(@z)X2jxY6aw0M_%w`{sw|qE zp&dF}p|+%1QRDCvkOh9EIOEUc&jEA-fEo;9m^~h}zwfEpK7+na1hhD_~{TFH(+joN(@}-?4#lr8NRnOydjv`d6GbHzud=lgkJaTkh zd4@-$JXgr$bbrsP4AyBIVE{R4)AndMDI9HGD^6SGB={Xx{zq1>po$L#-XH6_{OQ5p z#G&FwqMIxz+Az9Stbo6P(BJJ)a-FYbT;MY_z6wfyUc41~fW8%D4@MbG9RMd_3oHnTv;dzL4#^Qc8ESo;|k;qNuo zn=KeZjN+CEhwIX_a#H;NIq~=hG2z|c+lxM&ig+) zdp+ITisgSjJQrJ}Rfe{@A{lVS6J_zege8P;mCv(+97Tx4;Tf#ZRH~7Y^oQsF_BqW^ z_%JyQLoUkG{#M#YcVssC^NsvRCf6`Oz`TDaxr5}>BrlQBJQEC-FFLRh2L(TmXg@@y hFLA2eo~(-~fLe9Ot9TB6_VP!TA6$OE`Pt>W{tquK+42AY literal 0 HcmV?d00001 diff --git a/cotracker/models/core/cotracker/__pycache__/cotracker.cpython-38.pyc b/cotracker/models/core/cotracker/__pycache__/cotracker.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be29aa60add64e5c84aa6292c4dfc96e5c3b2d8c GIT binary patch literal 11890 zcmbVSTaX*sS?<%_YPDJ#&5TBK@%XZ`rB*Zw5*WPtD#F+%_CD2)?S5~)X zq>)7McKREu23sWntNwLDNE5Qv5ZSW^@PC@#fwD#Z(s;`Y!_se&q)DyX80DoB># z;rq|&mNcI65+ap;o%`iK|M{=ym5B*U!H2cpXk7caqWmLe#y>hTm+%XI2f$Ta)>dr! z&pN8DGMUyos;V%hquUu#qPC4r*3NcJ+Z1`No$FY()ydoWPQfmSGQB;~DcZ%(q&?Z0 zvZp%J_H?IYmzd%ygBe%ZVNCh3;$~dquHqV=+A42r_AE-Wu89&eDVak_&b3ftB_;DN zyRKC8yI8NXQdQ&Wpw{WNJ*VN-!VT^P&$|bTe$=gM@zkmpI_F%+>)i5Ox9P9OC#mGU zt`ju|S$_%rU604P7sAlL-0s%b z5ex24l{>dbn;4%`PVl!8*ZWs1^N zR7I&9C`dn9B#25h{DLx?gi6RFWm_GnJ2>?SX-A7xK;5Nu#-+3oX@J?#jFgt)YWJxo z7wLdjlmX1UlrDr5IAi2n1=kQQv-3Dz^wr(mee`vT;pAY-wIb%`7cke3Hkfwvl-tqz z=rJg{g>Z)YM}})1!K!vZm7;tiEQ|6Cl}GBL5@p=HOf^)ucwe*(X9sf_4{O|^RnH^8 zfZq`+k)_Bj?kJ-6=y)wwpzhGFj=2hOJGrt8WGK6z1(vETwz?Z(ZzFWvW~ccVR8AP> z(*V1`ukz3M;WKrqYR37XSF3xD>-EC5*b1ASG@Wbu$q;dVt?9a+4_e&vco{w8T&LZ0 zcnx$D7i%u4R18uBg~cU*qvLEgdALz)J0Tr+z_A2nmq2Wa89>Q#HX&);lANS$tTxUO zWs8Zb$Ky<}(epSjh>nia^qZmM#MYgr?{@DvZO@O5AVhmFwvrXn5LUZJBu$&=$I+D& zsl|H3_dGMS@o_(p&Xx5wP`by8m5LdgrgL_lh5V?lTWx_76 zuf=A&>5DzbSq!sQ>v{1c_g0%h=yB)PMx%jc)BWLu_*nJ*IQL=R-RP}glvwu(TGzeX z8+7$*?bx{1^t~F7^$)*v?UlGBuqJqhBPZwa#AXvrfO~GUjulk1u^xEs20ue{JVik4 zl(xs80kCrd&xFheEY3Mjysb1Y z3Q=xBX&A1G+nWismJVK^ud1%*>i5+FwFJj^vpf30h|I_;D^bDCplvbC;?A4k{^0ia z_5P&bE4hIcnP`vijvlElq|p~tmo-%41H%!N7H;YQ{AGvrr&g8zbT|=Zx7mxzTkPg0 z)~gPRJ4#=TCI^#|Msf$KsR(zwUjkPfj_Mj;P-)}^_j9lgTy!|8C#}SOBM$>~i;?%AAvXHQ%Plm8YI~!t*Mx_<1{l&-a zVAx_7>si3>NVK@azz=6J&ula+$HIBjIQqWTXt)0i?J@EWlY77pwEod(Iy%}=1t%5U zDmLp~KL~4nxVwiBE|Et3WDZovp8(h;dBlqVv2m-*Az&fJEnp`!O+$*=?2jC~(mKSGu&0qCm!}Y=%c$JPT&A};!Cc;VN@+yG}jiGNg zy*vC_N*c9Z&+}c$PJWD9JWt>Q0M%psJT=KQI<+45CRYw|LF`>Hs#pVqN((x*U>$NH z)hry5&or@(AZC|s_U*eO3cuNDO%iY0bo`l!ksp>Fc4=V$h7VVJYq+cp`IEaDz;!Lc_{ztI&1~+zxGp zU&2`Y1%Rq12$g@73g|jivw}+aD@eyh5F>ATF7(w}#|uXJgmxxU%~(|1X;#+SrKEOr zg24gwllD8R|4mF9lmHaA00~2A1p5Em$XfeRw!c@Ml#=vAX}VnkWh7^WLJ4F+&4Tg^ z2&4f5Ro9`gt_Vg^)uCX% zOejB_&~>^_(yhn4buWPOzxoGQYaoPVB9FJ88Y7Z->v?sbu6-8c40Y{qf_g}rzMAP9 z{j8L)k%~Mt-+sPNyp{DQP@?vW(S*y^Gr{wr_IXeXZ$#N>Vx0vqhnlNyGn6SU-PP7} zfsGPfl<2&HbVj7ndr|360^P7Ijw)r?Sqn;^fnqgR1Eor6HZ(|y45TIGBS~VaBgxU9 z2KR=>e3YbXwq-^J=FdYC>rv6ogNjR|(uS6l%|w~E7{7piIjqd&??4WcjR0-lvHZ(n zKAOZRlM9gL{c>0sOo)~yv;gsq%FrVUkQ}9?&Kj2MqI_mmSp_xqXCYB(rW+CD{(|BI zSA?B8NOFS7*lbWF3j#G}lq0DM38{~bI04IpS`?`s5-S7jD9{+tZ&;>7qRBQ&YYMG^ z5m+6T{6`Ox-P4fWw3>g4rckb9Jt|Vu7`=qqCxBB6Qhot#RQ~yBYJ`7z4uWHlb%M36 zt7vn^HGQ(N^k@>2cTuD!kUAPcBClrvkFG11l>RZ)%n+W@r-9QbM+@>CfNfx0M=eT@ zm6i7cXH?Rf8O-9O7YJ9t6h?O~`d`F9#y>7r_%_YA%KTzD2dix|Iu6WDZfpG$&@?1I zK?RWi&{u$UnO?xCPhbT{k}@l$k^ySm$+FU~L?@t~sW(^pC!>?tr~a14zm5K&h5jjE z>tu9lURhTT#m###@?z^KsHG(9(OR^Zl#2C&N>du5HG_(9w*Aw?e!vppdekqShj33g zIi&ZKVRmpVde7i^^aOOE(|{W7F)6E{?8&HtvWi$e&Szao@iRJk)So&0#Id#$Vm;_7 zP7NzeSLJH7u5~te0(gBY!I#c$cPe@+Z4;du-eIw~hJFETv{k_w>U=Gl_vfN{**2Xe z!PXPenZZf7i1jbvWGm5A(HY38>HFDN)&5eyD*C@2W&HwfYc@JbH)>n`c_mtMi${j% zb1%^^_nioKLUUjp)pV7^cH<{7YNvy8-+^X%O87rY?#x{?T6$60hSmw4eQt0H_PzagI($#ji z7T)mupvymX`E2FJ*~*XHte&q3)k(GqmiP6O0zWv=b0z87>o)zclC;|2Rj7uQCDfm- zbbDly*V>p1>hvv-SGtXgG=qcX%8Op3w$ToQO4zMD|HvQ#sRK>FvUIE4ZSNb%^J}-- z9y(Dy1ZIVl@%@8ct+fO1z<^TJzRQS7gU5|mx$Hql^D1HinD}g^UTe3-^zG$$6UZ>nH-g+%b=Nk)V?x4uQ2?!p8L2^^u+OEw5}ncjX5??@FV|gK&B4%#D;t{cc!+2dvlS&#E@JRwc zL*Qo#*aSWWuunlI<(M2=Gx!A+fK~AEn+tu?+Y*~1<)Xi+-qyi;*osiw;p0=6pNli% zz#cF=cO692>~uVL%$~9?v_S>+Zl`zg22}BYtQiC9z}A^DWj3*RmM_3C1hHL2BJET? z&0j_r=@aWA6&GI!0*@0rd5Lq_i0f3Sx4ZtTG;)QVoqG?; z!t#EaQo?F(QtD#_+5oX7dOPjrI_&N`hW0oj88D{3*LyYC17wuKj@MBU@N=R^W5cgY z78hsk;G%f&e~QLV4W@NX8t*WF;Tl}s=xzkuquEvnkc}B^Pfn zR&i{xncmAdhm<57{wj56H)3`rW-rI=ll&$PN;WHZ2neL<@L~3NgYraL_SF7c=DGH` zFA^t6ShDBnTJLX3_tBm?V_i&3 zevPItc%7ur4b*XA^k)+S{8N+!{|ca#rdWZ@uBigk*%Ykx0@?2RqB;fZeh&7#!7SBa zrdC9~t`rq@PuCWhl{vwlPAz*X%gl+Ku9%969C-p@`TviuC)WS>^^Bn|utjEJmV%hc z(iY%nsIU{{N5HHV{EoqofL<9Bz5|)knTcCOvzGOWF~?5rse4AoViiqC4;5o2UW)Ho z)&e`rmayUlb{cDzPF-64GCUf(x>Y)O6RR0MLD(q*+z|dPK80j8vx1K(g`a>buU@U@ zV$*SaxC6+G?~q4q1FKTLnu`bo238lr6-&GvnANiHDY*D&bC5bBy1il61?5%_|M z=T3-lSIM1prx4~cM3^fJmN&!6jNbZy5V_#xsnbac{Yto23sZGQDq|HdfU_RFInk&1 zEc)+W0t*xo8-d>4&!ddb14ta#EhN(qrOEmyA53rz{X%71888Zu2@i&PkAh3uJuquW zhXIKoS!lG#P)3;x^ZGso%W|>|t>7R}$}BP?ojg?2LTC&o5H!oV`aPJYP{}DcmK{vK zsl3VFR2wkmUE{79!Zau6HmSvEF&&vOHmCNr%O>sESUVW#V!Mwl*h03fIAgt?{FjruK1;nA@6H za}(E=AmB~Ze2%0aT^S>*zBBS5Zk;lL+ z_&Wa|a|&$k!zB~AfxEqU1yVUcbXvx&AG?Sz(;|ebpsIU6{M7vGv_EnpK6a+Bj?YwG z;HRnalK^o6;Z~=iJ`$N};o zj(G*YU=F~A%lj_59hCuv`WZbG0l&~z_I1Nr;jU6pgKiWQ3BguF)>5}s{ys$1SPStc z^7GID_|Lk^dJZ*IPM)$XJW}H2%5{ae&>ce2YQl+2u_mbNLqd^3AYEuSldsnhrs2+p z--PJYXVWzRQ^n*^iQm11|KNLZu*oHW^ zuVcjYxcXWI>?0Da_sL^TXP^$~B?p5*gBP%$4fR$AZ#%NX=tkNj1MhaGpMl>5LJ+ZD zrDbjl;e#0MvA%^UWNt@6v=_n?ekjA$;VYw88;FbIMMr*{A(xl+qE^RR5clP;MpinS z_%Ce3&pijv)Vhi~4FZ+KCVbvI`fvsVGnitA0%ER-RYgL(Ra{6sS@60iHEDSoL+#Vr zC!-uz1-~i0g2;u4uFLd=S|k+4(wC7huQr_xCx>hVkTSK@C|`O5@;mw>Re zmMJB|Ok`W}KP2#b1nv@`nRDVF@#C_7RtVX;?D{>xPOWdgqf5N937?be}k6SwAcm4!mif0a_Bi!NBZ z;Oe9SH)w;4NSI7FhRS!UkpE}#f|(PVave?feTci#Cs+a)s^F$jzY#qW@6MnAx>=~* zx=_0_q}K7@LUT!Fw4HheApH=xBgX~lW5Le>j5Op>ZE0{{fcGaluO1TH0RM;n3OzYu z4FsY<=y)LU6)nnqN&O<#WDoQ+!yH1TdKtQJpB{x|Axp9$o{mvC3QqgneR^`17c#|K zxw?A)-~M>;U*G!1MfgTaUfb*7DG<5LoElys`ChDfz2FAk6d=^?hu-V*In&p$^w_+; zzUh$WdH{18s)bxCh%L6nw(ir5L~SM3Az;?PtMlO1y!*PaIKZ)mzWs5;BF9@5n49h3y{9(h$m>G)j4tqgq&mrTsrL@$XZAz zp_zS?HcZ%*s(V4GNzOXL`Ktk*h2nAB=s}sCV9Z(d+8bOD`4BWIe7a{7PuYidoKuiA+DAz{J zIAzKrWCY3v%LkR?AK_bS4PmJHOXKZDXm!{!9Tg9y&@v>2hLJ+k%I5K4AkrokqH~B? z+=LcJhhih{%OFP&m+<5Y7A1moa0{Xi0muQKW$oZA%Rr8LA`UKLOQ8R3a*}aQm{3I9 zpaLNT`e?!b8X(R{Q;B2&ylPFq(Z%bH@D04Y(>k@+`Cn4QzasEA1ilSWHROrYJ>h># zfMkq4f##0ytvZ{vHr^=w9aW5&nBZT;p#OzV^)wRDR=}1QaGvCiH1P^!iF`X?(eTv3 zuVQL=l`;3od8V9K9dRULt;Qp)RdQ6C?(xi&_}bsoVu&(KRxAll2LnW)!W0h@mwjK} zm69eQmlfroC^P-hkvWf_|2qI&;jF9J z@jvUSw#wqP)>Bo5DLviJ$P%?{^s;ugXWFLBYu#MWvaMd;&i4v-L6+&=nO@N@_Gazb zUdb-?=Ips%*)B81RYvn%*RII%;1cX?-aNpTV|JVy6CI(+#U3Fioxut#I2C=d=+!;Xrnovr`(P|7q%_#2=8co;X?jX1xSwXv(rgLpC86wJGZ*%T?pv5g$RM0cZ^|}K`G(a~| zvB5#5a*zfnEGl~&J!i8mf{jMk3Fx?efh8!r1R_(;07{Os2}$FYmzq>)CFP5oz;r*>vyV*w9j}JJ8xYQRt+?&FN%(dcNCg_Xi^~mhD zJ-O#7i(#%e25vMf+_knJxWc)%(Q0AYbbkaPK2kj|%6&leHwLR1CDJ{D)@Ap`23@^I zH!@yqdu~HS`Ul?s;>V-1#G2$8PCU7YW;WYk0>b6(CRR|-M!N5ITjC_m@fZQQQ`(+* z2Y{WEcqU}tWl_#?n%#!)JI-C@&1c`)^MZHscQ*TS8dprEBw~CN2zRP<+;U~`ns_@# zr)m6;0VrmX8R=I|(<-B{j-M%0>JQBP5mjfUJzb|#)TtJ;?y2|md%9IZOZ2>FpsujD zH9z4IE9sH1Ht7h{&k-buBV5GKUj;pfnU3?og9kt+;m0ya`{?^yZ` z3t_IRv<$A|{$>KLqk|jhYbw{cen%ZqOYnT2-O)!zXoglr2@5=fw#6WeTW^B@gWuoL zhqIEi9qtB|GwN&B+;}Mh&?&=5}W`_++Yszpgmn9tnNhLun|xGi`~ z8M|7L7^sBha3L(N>DW84FkB25!_pGCSZ97o87_rOqYB{yn9z1~W1iU5&utuG*AA;&FZeqDFd?fQ`+8oa&XM}+}6 zBt*JHw@b{T!<9O-t!5)|ntgQiSL%ns>Le3kmm&?##R?6pp$XWeD|#DXD^JpNME8PN zxHwKN4B-x-S;YJ+x8eEVDDIXUWkkbUb0dShK?B@M$AuQ)l~NZGBy>fcK#j)GH{0$_ z@dPD}#$e!jJZ30ALoJ>n@E(Br5%CN)$+UWn0rnQJ90G$Fyku381{ReT^cwy;BtxoO zI5MAUV;g?NF4)Fp$;>0R8mZvu>~h2$yBLo}O(8!p6s_91MjDGf8?T+GdXSx$3y>Vq zw~ZDx^|82a|E6!7^her~ou4evvFDKS{FskpdI-0Oy&7d83WA1RkblX6q&d>se8)CZ z-YcFZbVzWHkWs{7HyeUGu|)bf%KV=LP?n0UpdC{m-&6IN83O*uFdt>sp029*)IHT8 zre`usv$Qj;sMgd)re6t&CH}av8=Y!Y+P%XliJD2`46C- z^xjGR*DXWoBwwT|31mURg4zoRqyYj| zm!YbzN(NEap=Wte1YA%P^!g+{Sy;4`6vbOh#kb3&N>Y&$7KFOX>nuj%eMELdsy&;~ za{5lvZ6Nwh*N57__D5K&FNI?wg|{A?B9gc2d1;@PeH!D8wd}tF^^g>OIWshd*;uZI zD)P{IhxsA#R5qMJi8?HXGn}nw{HH#od%kud(WewCcT!18{nXZJ8_f^FMt_VAe zkl+N9vDs*bYzNesVUDCJB%?kx;tXsKYEh(mNU995ql}6n`i;wUNV2$%(wRjoUjOZpaRJK zMNq^HQ3TRU81)gXpqi9fDV0zSpRFjvT6hdvnR<0~csx9ged=#$;(O>1S{R-HwvLA< zmXvknK-|0yBOmH4gIcg#!Wml2_A#Ymy`a*RhG@;8BAo5;(Q&`|`}?Ky5bg;l2lRe> zV2lojZyOy6AA#=kD4<4rOv-8~dwWQ5eg;#k{JxexS|r-q%S ztKu51Yn_db0k4lG_|k>VOW|W_oAAW=4ojT0^s`{2okwtny0{)Ld5hsv+%}yh!PX<; z$x)3LvHmJfwiZ4Xo`js5yOVuc9iAH2W&dx4S+9WGnhlTBjoMZ}r-Y|?@zD5uZYO%> zz7xStXb!BSp00A-Zu$f!?Q~G?o6snaOV3A{&)+h`Q_m~g&^Do~FOH4_-zOd%jkHk) zYP#_zR+|xBN$a>RlFqeho+JCuL*Jhdb+r7G$ypGOqcZ@#Wbem6@x*mDd>p#_V)%GV z8@|KSIUq)!58u(!AhRotbCAv-jjhI-6imKl)soM1V$FYMoTEZ1WNN1_b^DFritG7( z@zRCUwJWDQC4D12V=N zUCaeF`kE_h{Z=hDfc=%)^KPrL(GC1s(62pp-ylAz18uK%>RP|w-8YcyHLi7CbfS6) z%o-`-`v<$!==$yh1ID8EEk;ZlJZ!w$1s6J+TaydG#HVY`Mz<@c@2q~QtVSb2xkqFc@aL7m+qsu%X0Pax_bXo zCd2GU!2dPUnTL!-rbg}51<=esYG0k6R~mduvZ!?V4LV z`)nn#a$t0jzHYiH#wj6o3EIV92!^|y%^&^-2p z4Jfq^5b52%w-y_+(w5G^bJ_Prj~aN?KBAQ!dBN&;fph}M{C1t#_TN0+{C4D;pLQ#od(S3Mr@$N z#DxQJZKJ>83zueFC2)lX$olQIUcb%jrxU|lGUq6pa8Y?gwwd0+D2G%`B;sZ2&aOo4 zV#HpE*eAqQ8uXI{90G(SxDNw&AX=0sGO|niZ zx}w_{*tuqZ@R~!paeFvPH^(OXW=zkLvL&?>p+~x$mV6g&Px7^x7B^5Qq+y>;2=32O z;(r%FDVJD*OsrCY!I24@yg(MZzO0sD!!N>0H#AE%n5h*}U)j^ODzmci0FcFREV3#f z+4g0o{*SJkKhiUXT4l@3!dL}4o~2dQV_Jsw(3*v4Ds2k%xrBW7s- zMSv^POC?H3)-$X4$WmDSRC(!AJr|jdP7;Jn-hONqcmX{E&GGw?a3kEhX$SB@=j})Nb%&F@r z7U8wvdLu}clUTK?xjvlk;IxT$Bo@$r_dFPyjJ!zn?tTVkVhJF|annLF{ZN{0dGe_E zN6;@&wv`d1Fqia4sJAIdq}>MVb#$162zEg^%ytm4x(#z1VJ-@K<>E55f}1-jv&blP z@L}OVYuhj20y}DCA|WO3@5o zYNTIlH+(7n9uQiDPbsv>t0RS0Wyk{MwsKp&%>oFoJ9;dz?y|ZnsF7_^7%L8#+*Jh; zlE?!f?Nj;{N^9aUrKx?CBldOOZScfVB?)*FHDACH`gCQCtor7}54d&az(CVD+HsJS z`^onuzDf)I4FV$qI|RlqOhN1X>cQ*$f6OVdxeu32;s#Fh;zh_^A5rEw_WaOA+@(d3 z8`DtLy`Ow)@msXNuMrp#IAFWqoSvz^Bp#)96xNCg2%9?1>kZF?|6cqimCIu$P854Q zNw$l3Q`^}%JPH{SGlnQ9z0O1jag;IlQ3&~-3&l&i2ID8h} z?duQ$3WdhImiwqG&w><0Mw0t?uiFwZiYD zpa$J2su6&r2CSoQtK!`ViLnj>M&LmR*ikUQljX_=Lk*Ra%PR|al6-t}SrHv{hdi{} z@WxWG2zvLJM&c-rF4dOVR~v}42xr4x&7 zNX#HV#ti0``#~TZYtg4yioNAHds-RhZeR}&St0g6nxh~oocu*xB6C$rSQ!A42B$cW zGmPV*bdm^Pe3Aaj;_FoY0)gKpAkC;1O38Q;8BXF42>c#_TLkDP#ti27DfJ?OV+6iM z;F|=#0Z^ZfZJ|QKd!>s?&Kem(e;rwA{74ZkK8Fr=G3Aj-^;Hx`tP`;|VuSS7B`&uM zF<(t{G4G6XcsYV_;{BK5OBm=c=rWu|LV+G%gl=C3BZKzO5X*yIV6bJx|7v@xw#=5H z%^Q0Ltb_Q!4Bg(6`v2C^gD+8Hfz)$Sjf}ysVXBEPDZWD4i7p9~yKagvQ{gWY_!WRC z>o~mMgqBTQTF@1iI=J{%N=+`fWa*NdlhV6M8zVtrGTj*K+o=xzpT)CgPAbPuG}-r& zoB<{NDS)xoZAy(BPb}nns$aXIq}rNieCr&X8)dia_V9{_d|pli4~RTB z(%gZ61&;v`qxJ&#)%Yb6Nz=%@vA*e$vibn#G*nA@RFGTj$Zg%B$AsEyq(iV=2anE! zM~nWe(#Qa(mV)MKw4XZ2F}se;v)xAT8gHEYK2B5IM6<2>eqHQ9jWg~eFE|36c^Js) z+eLded1N&%$TgaQ(`~#46-VcyKd2Wb%2ezI$-PLm^3_v@UOl}lD$pN%2of_=u|}Yu zR76L<9+R!kkd+?@k`eIJbO#`7DV3x$_LsC_shf~mVi%-l`y(<3Un!erD#P2W>-Rbi~}nR;yIPpR%p55eGSGbMc*i|BPM z;J$+lF&<@o5_Wg8 B S C (H W) - if self.multiple_track_feats: - fmap1 = targets_split[i] - corrs = torch.matmul(fmap1, fmap2s) - corrs = corrs.view(B, S, N, H, W) # B S N (H W) -> B S N H W - corrs = corrs / torch.sqrt(torch.tensor(C).float()) - self.corrs_pyramid.append(corrs) - - -class Attention(nn.Module): - def __init__(self, query_dim, context_dim=None, num_heads=8, dim_head=48, qkv_bias=False): - super().__init__() - inner_dim = dim_head * num_heads - context_dim = default(context_dim, query_dim) - self.scale = dim_head**-0.5 - self.heads = num_heads - - self.to_q = nn.Linear(query_dim, inner_dim, bias=qkv_bias) - self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias=qkv_bias) - self.to_out = nn.Linear(inner_dim, query_dim) - - def forward(self, x, context=None, attn_bias=None): - B, N1, C = x.shape - h = self.heads - - q = self.to_q(x).reshape(B, N1, h, C // h).permute(0, 2, 1, 3) - context = default(context, x) - k, v = self.to_kv(context).chunk(2, dim=-1) - - N2 = context.shape[1] - k = k.reshape(B, N2, h, C // h).permute(0, 2, 1, 3) - v = v.reshape(B, N2, h, C // h).permute(0, 2, 1, 3) - - sim = (q @ k.transpose(-2, -1)) * self.scale - - if attn_bias is not None: - sim = sim + attn_bias - attn = sim.softmax(dim=-1) - - x = (attn @ v).transpose(1, 2).reshape(B, N1, C) - return self.to_out(x) - - -class AttnBlock(nn.Module): - def __init__( - self, - hidden_size, - num_heads, - attn_class: Callable[..., nn.Module] = Attention, - mlp_ratio=4.0, - **block_kwargs - ): - super().__init__() - self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) - self.attn = attn_class(hidden_size, num_heads=num_heads, qkv_bias=True, **block_kwargs) - - self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) - mlp_hidden_dim = int(hidden_size * mlp_ratio) - approx_gelu = lambda: nn.GELU(approximate="tanh") - self.mlp = Mlp( - in_features=hidden_size, - hidden_features=mlp_hidden_dim, - act_layer=approx_gelu, - drop=0, - ) - - def forward(self, x, mask=None): - attn_bias = mask - if mask is not None: - mask = ( - (mask[:, None] * mask[:, :, None]) - .unsqueeze(1) - .expand(-1, self.attn.num_heads, -1, -1) - ) - max_neg_value = -torch.finfo(x.dtype).max - attn_bias = (~mask) * max_neg_value - x = x + self.attn(self.norm1(x), attn_bias=attn_bias) - x = x + self.mlp(self.norm2(x)) - return x +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial +from typing import Callable +import collections +from torch import Tensor +from itertools import repeat + +from cotracker.models.core.model_utils import bilinear_sampler + + +# From PyTorch internals +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable) and not isinstance(x, str): + return tuple(x) + return tuple(repeat(x, n)) + + return parse + + +def exists(val): + return val is not None + + +def default(val, d): + return val if exists(val) else d + + +to_2tuple = _ntuple(2) + + +class Mlp(nn.Module): + """MLP as used in Vision Transformer, MLP-Mixer and related networks""" + + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + norm_layer=None, + bias=True, + drop=0.0, + use_conv=False, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + bias = to_2tuple(bias) + drop_probs = to_2tuple(drop) + linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear + + self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0]) + self.act = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity() + self.fc2 = linear_layer(hidden_features, out_features, bias=bias[1]) + self.drop2 = nn.Dropout(drop_probs[1]) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop1(x) + x = self.fc2(x) + x = self.drop2(x) + return x + + +class ResidualBlock(nn.Module): + def __init__(self, in_planes, planes, norm_fn="group", stride=1): + super(ResidualBlock, self).__init__() + + self.conv1 = nn.Conv2d( + in_planes, + planes, + kernel_size=3, + padding=1, + stride=stride, + padding_mode="zeros", + ) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, padding_mode="zeros") + self.relu = nn.ReLU(inplace=True) + + num_groups = planes // 8 + + if norm_fn == "group": + self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + if not stride == 1: + self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + + elif norm_fn == "batch": + self.norm1 = nn.BatchNorm2d(planes) + self.norm2 = nn.BatchNorm2d(planes) + if not stride == 1: + self.norm3 = nn.BatchNorm2d(planes) + + elif norm_fn == "instance": + self.norm1 = nn.InstanceNorm2d(planes) + self.norm2 = nn.InstanceNorm2d(planes) + if not stride == 1: + self.norm3 = nn.InstanceNorm2d(planes) + + elif norm_fn == "none": + self.norm1 = nn.Sequential() + self.norm2 = nn.Sequential() + if not stride == 1: + self.norm3 = nn.Sequential() + + if stride == 1: + self.downsample = None + + else: + self.downsample = nn.Sequential( + nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3 + ) + + def forward(self, x): + y = x + y = self.relu(self.norm1(self.conv1(y))) + y = self.relu(self.norm2(self.conv2(y))) + + if self.downsample is not None: + x = self.downsample(x) + + return self.relu(x + y) + + +class BasicEncoder(nn.Module): + def __init__(self, input_dim=3, output_dim=128, stride=4): + super(BasicEncoder, self).__init__() + self.stride = stride + self.norm_fn = "instance" + self.in_planes = output_dim // 2 + + self.norm1 = nn.InstanceNorm2d(self.in_planes) + self.norm2 = nn.InstanceNorm2d(output_dim * 2) + + self.conv1 = nn.Conv2d( + input_dim, + self.in_planes, + kernel_size=7, + stride=2, + padding=3, + padding_mode="zeros", + ) + self.relu1 = nn.ReLU(inplace=True) + self.layer1 = self._make_layer(output_dim // 2, stride=1) + self.layer2 = self._make_layer(output_dim // 4 * 3, stride=2) + self.layer3 = self._make_layer(output_dim, stride=2) + self.layer4 = self._make_layer(output_dim, stride=2) + + self.conv2 = nn.Conv2d( + output_dim * 3 + output_dim // 4, + output_dim * 2, + kernel_size=3, + padding=1, + padding_mode="zeros", + ) + self.relu2 = nn.ReLU(inplace=True) + self.conv3 = nn.Conv2d(output_dim * 2, output_dim, kernel_size=1) + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") + elif isinstance(m, (nn.InstanceNorm2d)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def _make_layer(self, dim, stride=1): + layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride) + layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1) + layers = (layer1, layer2) + + self.in_planes = dim + return nn.Sequential(*layers) + + def forward(self, x): + _, _, H, W = x.shape + + x = self.conv1(x) + x = self.norm1(x) + x = self.relu1(x) + + # 四层残差块 + a = self.layer1(x) + b = self.layer2(a) + c = self.layer3(b) + d = self.layer4(c) + + def _bilinear_intepolate(x): + return F.interpolate( + x, + (H // self.stride, W // self.stride), + mode="bilinear", + align_corners=True, + ) + + a = _bilinear_intepolate(a) + b = _bilinear_intepolate(b) + c = _bilinear_intepolate(c) + d = _bilinear_intepolate(d) + + x = self.conv2(torch.cat([a, b, c, d], dim=1)) + x = self.norm2(x) + x = self.relu2(x) + x = self.conv3(x) + return x + + +class CorrBlock: + def __init__( + self, + fmaps, + num_levels=4, + radius=4, + multiple_track_feats=False, + padding_mode="zeros", + ): + B, S, C, H, W = fmaps.shape + self.S, self.C, self.H, self.W = S, C, H, W + self.padding_mode = padding_mode + self.num_levels = num_levels + self.radius = radius + self.fmaps_pyramid = [] + self.multiple_track_feats = multiple_track_feats + + self.fmaps_pyramid.append(fmaps) + for i in range(self.num_levels - 1): + fmaps_ = fmaps.reshape(B * S, C, H, W) + fmaps_ = F.avg_pool2d(fmaps_, 2, stride=2) + _, _, H, W = fmaps_.shape + fmaps = fmaps_.reshape(B, S, C, H, W) + self.fmaps_pyramid.append(fmaps) + + def sample(self, coords): + r = self.radius + B, S, N, D = coords.shape + assert D == 2 + + H, W = self.H, self.W + out_pyramid = [] + for i in range(self.num_levels): + corrs = self.corrs_pyramid[i] # B, S, N, H, W + *_, H, W = corrs.shape + + dx = torch.linspace(-r, r, 2 * r + 1) + dy = torch.linspace(-r, r, 2 * r + 1) + delta = torch.stack(torch.meshgrid(dy, dx, indexing="ij"), axis=-1).to(coords.device) + + centroid_lvl = coords.reshape(B * S * N, 1, 1, 2) / 2**i + delta_lvl = delta.view(1, 2 * r + 1, 2 * r + 1, 2) + coords_lvl = centroid_lvl + delta_lvl + + corrs = bilinear_sampler( + corrs.reshape(B * S * N, 1, H, W), + coords_lvl, + padding_mode=self.padding_mode, + ) + corrs = corrs.view(B, S, N, -1) + out_pyramid.append(corrs) + + out = torch.cat(out_pyramid, dim=-1) # B, S, N, LRR*2 + out = out.permute(0, 2, 1, 3).contiguous().view(B * N, S, -1).float() + return out + + def corr(self, targets): + B, S, N, C = targets.shape + if self.multiple_track_feats: + targets_split = targets.split(C // self.num_levels, dim=-1) + B, S, N, C = targets_split[0].shape + + assert C == self.C + assert S == self.S + + fmap1 = targets + + self.corrs_pyramid = [] + for i, fmaps in enumerate(self.fmaps_pyramid): + *_, H, W = fmaps.shape + fmap2s = fmaps.view(B, S, C, H * W) # B S C H W -> B S C (H W) + if self.multiple_track_feats: + fmap1 = targets_split[i] + corrs = torch.matmul(fmap1, fmap2s) + corrs = corrs.view(B, S, N, H, W) # B S N (H W) -> B S N H W + corrs = corrs / torch.sqrt(torch.tensor(C).float()) + self.corrs_pyramid.append(corrs) + + +class Attention(nn.Module): + def __init__(self, query_dim, context_dim=None, num_heads=8, dim_head=48, qkv_bias=False): + super().__init__() + inner_dim = dim_head * num_heads + context_dim = default(context_dim, query_dim) + self.scale = dim_head**-0.5 + self.heads = num_heads + + self.to_q = nn.Linear(query_dim, inner_dim, bias=qkv_bias) + self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias=qkv_bias) + self.to_out = nn.Linear(inner_dim, query_dim) + + def forward(self, x, context=None, attn_bias=None): + B, N1, C = x.shape + h = self.heads + + q = self.to_q(x).reshape(B, N1, h, C // h).permute(0, 2, 1, 3) + context = default(context, x) + k, v = self.to_kv(context).chunk(2, dim=-1) + + N2 = context.shape[1] + k = k.reshape(B, N2, h, C // h).permute(0, 2, 1, 3) + v = v.reshape(B, N2, h, C // h).permute(0, 2, 1, 3) + + sim = (q @ k.transpose(-2, -1)) * self.scale + + if attn_bias is not None: + sim = sim + attn_bias + attn = sim.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N1, C) + return self.to_out(x) + + +class AttnBlock(nn.Module): + def __init__( + self, + hidden_size, + num_heads, + attn_class: Callable[..., nn.Module] = Attention, + mlp_ratio=4.0, + **block_kwargs + ): + super().__init__() + self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) + self.attn = attn_class(hidden_size, num_heads=num_heads, qkv_bias=True, **block_kwargs) + + self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) + mlp_hidden_dim = int(hidden_size * mlp_ratio) + approx_gelu = lambda: nn.GELU(approximate="tanh") + self.mlp = Mlp( + in_features=hidden_size, + hidden_features=mlp_hidden_dim, + act_layer=approx_gelu, + drop=0, + ) + + def forward(self, x, mask=None): + attn_bias = mask + if mask is not None: + mask = ( + (mask[:, None] * mask[:, :, None]) + .unsqueeze(1) + .expand(-1, self.attn.num_heads, -1, -1) + ) + max_neg_value = -torch.finfo(x.dtype).max + attn_bias = (~mask) * max_neg_value + x = x + self.attn(self.norm1(x), attn_bias=attn_bias) + x = x + self.mlp(self.norm2(x)) + return x diff --git a/cotracker/models/core/cotracker/cotracker.py b/cotracker/models/core/cotracker/cotracker.py index 53178fb..41422ca 100644 --- a/cotracker/models/core/cotracker/cotracker.py +++ b/cotracker/models/core/cotracker/cotracker.py @@ -1,503 +1,519 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from cotracker.models.core.model_utils import sample_features4d, sample_features5d -from cotracker.models.core.embeddings import ( - get_2d_embedding, - get_1d_sincos_pos_embed_from_grid, - get_2d_sincos_pos_embed, -) - -from cotracker.models.core.cotracker.blocks import ( - Mlp, - BasicEncoder, - AttnBlock, - CorrBlock, - Attention, -) - -torch.manual_seed(0) - - -class CoTracker2(nn.Module): - def __init__( - self, - window_len=8, - stride=4, - add_space_attn=True, - num_virtual_tracks=64, - model_resolution=(384, 512), - ): - super(CoTracker2, self).__init__() - self.window_len = window_len - self.stride = stride - self.hidden_dim = 256 - self.latent_dim = 128 - self.add_space_attn = add_space_attn - self.fnet = BasicEncoder(output_dim=self.latent_dim) - self.num_virtual_tracks = num_virtual_tracks - self.model_resolution = model_resolution - self.input_dim = 456 - self.updateformer = EfficientUpdateFormer( - space_depth=6, - time_depth=6, - input_dim=self.input_dim, - hidden_size=384, - output_dim=self.latent_dim + 2, - mlp_ratio=4.0, - add_space_attn=add_space_attn, - num_virtual_tracks=num_virtual_tracks, - ) - - time_grid = torch.linspace(0, window_len - 1, window_len).reshape(1, window_len, 1) - - self.register_buffer( - "time_emb", get_1d_sincos_pos_embed_from_grid(self.input_dim, time_grid[0]) - ) - - self.register_buffer( - "pos_emb", - get_2d_sincos_pos_embed( - embed_dim=self.input_dim, - grid_size=( - model_resolution[0] // stride, - model_resolution[1] // stride, - ), - ), - ) - self.norm = nn.GroupNorm(1, self.latent_dim) - self.track_feat_updater = nn.Sequential( - nn.Linear(self.latent_dim, self.latent_dim), - nn.GELU(), - ) - self.vis_predictor = nn.Sequential( - nn.Linear(self.latent_dim, 1), - ) - - def forward_window( - self, - fmaps, - coords, - track_feat=None, - vis=None, - track_mask=None, - attention_mask=None, - iters=4, - ): - # B = batch size - # S = number of frames in the window) - # N = number of tracks - # C = channels of a point feature vector - # E = positional embedding size - # LRR = local receptive field radius - # D = dimension of the transformer input tokens - - # track_feat = B S N C - # vis = B S N 1 - # track_mask = B S N 1 - # attention_mask = B S N - - B, S_init, N, __ = track_mask.shape - B, S, *_ = fmaps.shape - - track_mask = F.pad(track_mask, (0, 0, 0, 0, 0, S - S_init), "constant") - track_mask_vis = ( - torch.cat([track_mask, vis], dim=-1).permute(0, 2, 1, 3).reshape(B * N, S, 2) - ) - - corr_block = CorrBlock( - fmaps, - num_levels=4, - radius=3, - padding_mode="border", - ) - - sampled_pos_emb = ( - sample_features4d(self.pos_emb.repeat(B, 1, 1, 1), coords[:, 0]) - .reshape(B * N, self.input_dim) - .unsqueeze(1) - ) # B E N -> (B N) 1 E - - coord_preds = [] - for __ in range(iters): - coords = coords.detach() # B S N 2 - corr_block.corr(track_feat) - - # Sample correlation features around each point - fcorrs = corr_block.sample(coords) # (B N) S LRR - - # Get the flow embeddings - flows = (coords - coords[:, 0:1]).permute(0, 2, 1, 3).reshape(B * N, S, 2) - flow_emb = get_2d_embedding(flows, 64, cat_coords=True) # N S E - - track_feat_ = track_feat.permute(0, 2, 1, 3).reshape(B * N, S, self.latent_dim) - - transformer_input = torch.cat([flow_emb, fcorrs, track_feat_, track_mask_vis], dim=2) - x = transformer_input + sampled_pos_emb + self.time_emb - x = x.view(B, N, S, -1) # (B N) S D -> B N S D - - delta = self.updateformer( - x, - attention_mask.reshape(B * S, N), # B S N -> (B S) N - ) - - delta_coords = delta[..., :2].permute(0, 2, 1, 3) - coords = coords + delta_coords - coord_preds.append(coords * self.stride) - - delta_feats_ = delta[..., 2:].reshape(B * N * S, self.latent_dim) - track_feat_ = track_feat.permute(0, 2, 1, 3).reshape(B * N * S, self.latent_dim) - track_feat_ = self.track_feat_updater(self.norm(delta_feats_)) + track_feat_ - track_feat = track_feat_.reshape(B, N, S, self.latent_dim).permute( - 0, 2, 1, 3 - ) # (B N S) C -> B S N C - - vis_pred = self.vis_predictor(track_feat).reshape(B, S, N) - return coord_preds, vis_pred - - def get_track_feat(self, fmaps, queried_frames, queried_coords): - sample_frames = queried_frames[:, None, :, None] - sample_coords = torch.cat( - [ - sample_frames, - queried_coords[:, None], - ], - dim=-1, - ) - sample_track_feats = sample_features5d(fmaps, sample_coords) - return sample_track_feats - - def init_video_online_processing(self): - self.online_ind = 0 - self.online_track_feat = None - self.online_coords_predicted = None - self.online_vis_predicted = None - - def forward(self, video, queries, iters=4, is_train=False, is_online=False): - """Predict tracks - - Args: - video (FloatTensor[B, T, 3]): input videos. - queries (FloatTensor[B, N, 3]): point queries. - iters (int, optional): number of updates. Defaults to 4. - is_train (bool, optional): enables training mode. Defaults to False. - is_online (bool, optional): enables online mode. Defaults to False. Before enabling, call model.init_video_online_processing(). - - Returns: - - coords_predicted (FloatTensor[B, T, N, 2]): - - vis_predicted (FloatTensor[B, T, N]): - - train_data: `None` if `is_train` is false, otherwise: - - all_vis_predictions (List[FloatTensor[B, S, N, 1]]): - - all_coords_predictions (List[FloatTensor[B, S, N, 2]]): - - mask (BoolTensor[B, T, N]): - """ - B, T, C, H, W = video.shape - B, N, __ = queries.shape - S = self.window_len - device = queries.device - - # B = batch size - # S = number of frames in the window of the padded video - # S_trimmed = actual number of frames in the window - # N = number of tracks - # C = color channels (3 for RGB) - # E = positional embedding size - # LRR = local receptive field radius - # D = dimension of the transformer input tokens - - # video = B T C H W - # queries = B N 3 - # coords_init = B S N 2 - # vis_init = B S N 1 - - assert S >= 2 # A tracker needs at least two frames to track something - if is_online: - assert T <= S, "Online mode: video chunk must be <= window size." - assert self.online_ind is not None, "Call model.init_video_online_processing() first." - assert not is_train, "Training not supported in online mode." - step = S // 2 # How much the sliding window moves at every step - video = 2 * (video / 255.0) - 1.0 - - # The first channel is the frame number - # The rest are the coordinates of points we want to track - queried_frames = queries[:, :, 0].long() - - queried_coords = queries[..., 1:] - queried_coords = queried_coords / self.stride - - # We store our predictions here - coords_predicted = torch.zeros((B, T, N, 2), device=device) - vis_predicted = torch.zeros((B, T, N), device=device) - if is_online: - if self.online_coords_predicted is None: - # Init online predictions with zeros - self.online_coords_predicted = coords_predicted - self.online_vis_predicted = vis_predicted - else: - # Pad online predictions with zeros for the current window - pad = min(step, T - step) - coords_predicted = F.pad( - self.online_coords_predicted, (0, 0, 0, 0, 0, pad), "constant" - ) - vis_predicted = F.pad(self.online_vis_predicted, (0, 0, 0, pad), "constant") - all_coords_predictions, all_vis_predictions = [], [] - - # Pad the video so that an integer number of sliding windows fit into it - # TODO: we may drop this requirement because the transformer should not care - # TODO: pad the features instead of the video - pad = S - T if is_online else (S - T % S) % S # We don't want to pad if T % S == 0 - video = F.pad(video.reshape(B, 1, T, C * H * W), (0, 0, 0, pad), "replicate").reshape( - B, -1, C, H, W - ) - - # Compute convolutional features for the video or for the current chunk in case of online mode - fmaps = self.fnet(video.reshape(-1, C, H, W)).reshape( - B, -1, self.latent_dim, H // self.stride, W // self.stride - ) - - # We compute track features - track_feat = self.get_track_feat( - fmaps, - queried_frames - self.online_ind if is_online else queried_frames, - queried_coords, - ).repeat(1, S, 1, 1) - if is_online: - # We update track features for the current window - sample_frames = queried_frames[:, None, :, None] # B 1 N 1 - left = 0 if self.online_ind == 0 else self.online_ind + step - right = self.online_ind + S - sample_mask = (sample_frames >= left) & (sample_frames < right) - if self.online_track_feat is None: - self.online_track_feat = torch.zeros_like(track_feat, device=device) - self.online_track_feat += track_feat * sample_mask - track_feat = self.online_track_feat.clone() - # We process ((num_windows - 1) * step + S) frames in total, so there are - # (ceil((T - S) / step) + 1) windows - num_windows = (T - S + step - 1) // step + 1 - # We process only the current video chunk in the online mode - indices = [self.online_ind] if is_online else range(0, step * num_windows, step) - - coords_init = queried_coords.reshape(B, 1, N, 2).expand(B, S, N, 2).float() - vis_init = torch.ones((B, S, N, 1), device=device).float() * 10 - for ind in indices: - # We copy over coords and vis for tracks that are queried - # by the end of the previous window, which is ind + overlap - if ind > 0: - overlap = S - step - copy_over = (queried_frames < ind + overlap)[:, None, :, None] # B 1 N 1 - coords_prev = torch.nn.functional.pad( - coords_predicted[:, ind : ind + overlap] / self.stride, - (0, 0, 0, 0, 0, step), - "replicate", - ) # B S N 2 - vis_prev = torch.nn.functional.pad( - vis_predicted[:, ind : ind + overlap, :, None].clone(), - (0, 0, 0, 0, 0, step), - "replicate", - ) # B S N 1 - coords_init = torch.where( - copy_over.expand_as(coords_init), coords_prev, coords_init - ) - vis_init = torch.where(copy_over.expand_as(vis_init), vis_prev, vis_init) - - # The attention mask is 1 for the spatio-temporal points within - # a track which is updated in the current window - attention_mask = (queried_frames < ind + S).reshape(B, 1, N).repeat(1, S, 1) # B S N - - # The track mask is 1 for the spatio-temporal points that actually - # need updating: only after begin queried, and not if contained - # in a previous window - track_mask = ( - queried_frames[:, None, :, None] - <= torch.arange(ind, ind + S, device=device)[None, :, None, None] - ).contiguous() # B S N 1 - - if ind > 0: - track_mask[:, :overlap, :, :] = False - - # Predict the coordinates and visibility for the current window - coords, vis = self.forward_window( - fmaps=fmaps if is_online else fmaps[:, ind : ind + S], - coords=coords_init, - track_feat=attention_mask.unsqueeze(-1) * track_feat, - vis=vis_init, - track_mask=track_mask, - attention_mask=attention_mask, - iters=iters, - ) - - S_trimmed = T if is_online else min(T - ind, S) # accounts for last window duration - coords_predicted[:, ind : ind + S] = coords[-1][:, :S_trimmed] - vis_predicted[:, ind : ind + S] = vis[:, :S_trimmed] - if is_train: - all_coords_predictions.append([coord[:, :S_trimmed] for coord in coords]) - all_vis_predictions.append(torch.sigmoid(vis[:, :S_trimmed])) - - if is_online: - self.online_ind += step - self.online_coords_predicted = coords_predicted - self.online_vis_predicted = vis_predicted - vis_predicted = torch.sigmoid(vis_predicted) - - if is_train: - mask = queried_frames[:, None] <= torch.arange(0, T, device=device)[None, :, None] - train_data = (all_coords_predictions, all_vis_predictions, mask) - else: - train_data = None - - return coords_predicted, vis_predicted, train_data - - -class EfficientUpdateFormer(nn.Module): - """ - Transformer model that updates track estimates. - """ - - def __init__( - self, - space_depth=6, - time_depth=6, - input_dim=320, - hidden_size=384, - num_heads=8, - output_dim=130, - mlp_ratio=4.0, - add_space_attn=True, - num_virtual_tracks=64, - ): - super().__init__() - self.out_channels = 2 - self.num_heads = num_heads - self.hidden_size = hidden_size - self.add_space_attn = add_space_attn - self.input_transform = torch.nn.Linear(input_dim, hidden_size, bias=True) - self.flow_head = torch.nn.Linear(hidden_size, output_dim, bias=True) - self.num_virtual_tracks = num_virtual_tracks - self.virual_tracks = nn.Parameter(torch.randn(1, num_virtual_tracks, 1, hidden_size)) - self.time_blocks = nn.ModuleList( - [ - AttnBlock( - hidden_size, - num_heads, - mlp_ratio=mlp_ratio, - attn_class=Attention, - ) - for _ in range(time_depth) - ] - ) - - if add_space_attn: - self.space_virtual_blocks = nn.ModuleList( - [ - AttnBlock( - hidden_size, - num_heads, - mlp_ratio=mlp_ratio, - attn_class=Attention, - ) - for _ in range(space_depth) - ] - ) - self.space_point2virtual_blocks = nn.ModuleList( - [ - CrossAttnBlock(hidden_size, hidden_size, num_heads, mlp_ratio=mlp_ratio) - for _ in range(space_depth) - ] - ) - self.space_virtual2point_blocks = nn.ModuleList( - [ - CrossAttnBlock(hidden_size, hidden_size, num_heads, mlp_ratio=mlp_ratio) - for _ in range(space_depth) - ] - ) - assert len(self.time_blocks) >= len(self.space_virtual2point_blocks) - self.initialize_weights() - - def initialize_weights(self): - def _basic_init(module): - if isinstance(module, nn.Linear): - torch.nn.init.xavier_uniform_(module.weight) - if module.bias is not None: - nn.init.constant_(module.bias, 0) - - self.apply(_basic_init) - - def forward(self, input_tensor, mask=None): - tokens = self.input_transform(input_tensor) - B, _, T, _ = tokens.shape - virtual_tokens = self.virual_tracks.repeat(B, 1, T, 1) - tokens = torch.cat([tokens, virtual_tokens], dim=1) - _, N, _, _ = tokens.shape - - j = 0 - for i in range(len(self.time_blocks)): - time_tokens = tokens.contiguous().view(B * N, T, -1) # B N T C -> (B N) T C - time_tokens = self.time_blocks[i](time_tokens) - - tokens = time_tokens.view(B, N, T, -1) # (B N) T C -> B N T C - if self.add_space_attn and ( - i % (len(self.time_blocks) // len(self.space_virtual_blocks)) == 0 - ): - space_tokens = ( - tokens.permute(0, 2, 1, 3).contiguous().view(B * T, N, -1) - ) # B N T C -> (B T) N C - point_tokens = space_tokens[:, : N - self.num_virtual_tracks] - virtual_tokens = space_tokens[:, N - self.num_virtual_tracks :] - - virtual_tokens = self.space_virtual2point_blocks[j]( - virtual_tokens, point_tokens, mask=mask - ) - virtual_tokens = self.space_virtual_blocks[j](virtual_tokens) - point_tokens = self.space_point2virtual_blocks[j]( - point_tokens, virtual_tokens, mask=mask - ) - space_tokens = torch.cat([point_tokens, virtual_tokens], dim=1) - tokens = space_tokens.view(B, T, N, -1).permute(0, 2, 1, 3) # (B T) N C -> B N T C - j += 1 - tokens = tokens[:, : N - self.num_virtual_tracks] - flow = self.flow_head(tokens) - return flow - - -class CrossAttnBlock(nn.Module): - def __init__(self, hidden_size, context_dim, num_heads=1, mlp_ratio=4.0, **block_kwargs): - super().__init__() - self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) - self.norm_context = nn.LayerNorm(hidden_size) - self.cross_attn = Attention( - hidden_size, context_dim=context_dim, num_heads=num_heads, qkv_bias=True, **block_kwargs - ) - - self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) - mlp_hidden_dim = int(hidden_size * mlp_ratio) - approx_gelu = lambda: nn.GELU(approximate="tanh") - self.mlp = Mlp( - in_features=hidden_size, - hidden_features=mlp_hidden_dim, - act_layer=approx_gelu, - drop=0, - ) - - def forward(self, x, context, mask=None): - if mask is not None: - if mask.shape[1] == x.shape[1]: - mask = mask[:, None, :, None].expand( - -1, self.cross_attn.heads, -1, context.shape[1] - ) - else: - mask = mask[:, None, None].expand(-1, self.cross_attn.heads, x.shape[1], -1) - - max_neg_value = -torch.finfo(x.dtype).max - attn_bias = (~mask) * max_neg_value - x = x + self.cross_attn( - self.norm1(x), context=self.norm_context(context), attn_bias=attn_bias - ) - x = x + self.mlp(self.norm2(x)) - return x +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from cotracker.models.core.model_utils import sample_features4d, sample_features5d +from cotracker.models.core.embeddings import ( + get_2d_embedding, + get_1d_sincos_pos_embed_from_grid, + get_2d_sincos_pos_embed, +) + +from cotracker.models.core.cotracker.blocks import ( + Mlp, + BasicEncoder, + AttnBlock, + CorrBlock, + Attention, +) + +torch.manual_seed(0) + + +class CoTracker2(nn.Module): + def __init__( + self, + window_len=8, + stride=4, + add_space_attn=True, + num_virtual_tracks=64, + model_resolution=(384, 512), + ): + super(CoTracker2, self).__init__() + self.window_len = window_len + self.stride = stride + self.hidden_dim = 256 + self.latent_dim = 128 + self.add_space_attn = add_space_attn + + self.fnet = BasicEncoder(output_dim=self.latent_dim) + self.num_virtual_tracks = num_virtual_tracks + self.model_resolution = model_resolution + self.input_dim = 456 + self.updateformer = EfficientUpdateFormer( + space_depth=6, + time_depth=6, + input_dim=self.input_dim, + hidden_size=384, + output_dim=self.latent_dim + 2, + mlp_ratio=4.0, + add_space_attn=add_space_attn, + num_virtual_tracks=num_virtual_tracks, + ) + + time_grid = torch.linspace(0, window_len - 1, window_len).reshape(1, window_len, 1) + + self.register_buffer( + "time_emb", get_1d_sincos_pos_embed_from_grid(self.input_dim, time_grid[0]) + ) + + self.register_buffer( + "pos_emb", + get_2d_sincos_pos_embed( + embed_dim=self.input_dim, + grid_size=( + model_resolution[0] // stride, + model_resolution[1] // stride, + ), + ), + ) + self.norm = nn.GroupNorm(1, self.latent_dim) + self.track_feat_updater = nn.Sequential( + nn.Linear(self.latent_dim, self.latent_dim), + nn.GELU(), + ) + self.vis_predictor = nn.Sequential( + nn.Linear(self.latent_dim, 1), + ) + + def forward_window( + self, + fmaps, + coords, + track_feat=None, + vis=None, + track_mask=None, + attention_mask=None, + iters=4, + ): + # B = batch size + # S = number of frames in the window) + # N = number of tracks + # C = channels of a point feature vector + # E = positional embedding size + # LRR = local receptive field radius + # D = dimension of the transformer input tokens + + # track_feat = B S N C + # vis = B S N 1 + # track_mask = B S N 1 + # attention_mask = B S N + + B, S_init, N, __ = track_mask.shape + B, S, *_ = fmaps.shape + + # 填充使得track_mask 的帧数与特征图的帧数一致。 + track_mask = F.pad(track_mask, (0, 0, 0, 0, 0, S - S_init), "constant") + track_mask_vis = ( + torch.cat([track_mask, vis], dim=-1).permute(0, 2, 1, 3).reshape(B * N, S, 2) + ) + + corr_block = CorrBlock( + fmaps, + num_levels=4, + radius=3, + padding_mode="border", + ) + + sampled_pos_emb = ( + sample_features4d(self.pos_emb.repeat(B, 1, 1, 1), coords[:, 0]) + .reshape(B * N, self.input_dim) + .unsqueeze(1) + ) # B E N -> (B N) 1 E + + coord_preds = [] + for __ in range(iters): + coords = coords.detach() # B S N 2 + corr_block.corr(track_feat) + + # Sample correlation features around each point + fcorrs = corr_block.sample(coords) # (B N) S LRR + + # Get the flow embeddings + flows = (coords - coords[:, 0:1]).permute(0, 2, 1, 3).reshape(B * N, S, 2) + flow_emb = get_2d_embedding(flows, 64, cat_coords=True) # N S E + + track_feat_ = track_feat.permute(0, 2, 1, 3).reshape(B * N, S, self.latent_dim) + + transformer_input = torch.cat([flow_emb, fcorrs, track_feat_, track_mask_vis], dim=2) + x = transformer_input + sampled_pos_emb + self.time_emb + x = x.view(B, N, S, -1) # (B N) S D -> B N S D + + delta = self.updateformer( + x, + attention_mask.reshape(B * S, N), # B S N -> (B S) N + ) + + delta_coords = delta[..., :2].permute(0, 2, 1, 3) + coords = coords + delta_coords + coord_preds.append(coords * self.stride) + + delta_feats_ = delta[..., 2:].reshape(B * N * S, self.latent_dim) + track_feat_ = track_feat.permute(0, 2, 1, 3).reshape(B * N * S, self.latent_dim) + track_feat_ = self.track_feat_updater(self.norm(delta_feats_)) + track_feat_ + track_feat = track_feat_.reshape(B, N, S, self.latent_dim).permute( + 0, 2, 1, 3 + ) # (B N S) C -> B S N C + + vis_pred = self.vis_predictor(track_feat).reshape(B, S, N) + return coord_preds, vis_pred + + def get_track_feat(self, fmaps, queried_frames, queried_coords): + sample_frames = queried_frames[:, None, :, None] + sample_coords = torch.cat( + [ + sample_frames, + queried_coords[:, None], + ], + dim=-1, + ) + # 双线性采样 + sample_track_feats = sample_features5d(fmaps, sample_coords) + return sample_track_feats + + def init_video_online_processing(self): + self.online_ind = 0 + self.online_track_feat = None + self.online_coords_predicted = None + self.online_vis_predicted = None + + def forward(self, video, queries, iters=4, is_train=False, is_online=False): + """Predict tracks + + Args: + video (FloatTensor[B, T, 3]): input videos. + queries (FloatTensor[B, N, 3]): point queries. + iters (int, optional): number of updates. Defaults to 4. + is_train (bool, optional): enables training mode. Defaults to False. + is_online (bool, optional): enables online mode. Defaults to False. Before enabling, call model.init_video_online_processing(). + + Returns: + - coords_predicted (FloatTensor[B, T, N, 2]): + - vis_predicted (FloatTensor[B, T, N]): + - train_data: `None` if `is_train` is false, otherwise: + - all_vis_predictions (List[FloatTensor[B, S, N, 1]]): + - all_coords_predictions (List[FloatTensor[B, S, N, 2]]): + - mask (BoolTensor[B, T, N]): + """ + B, T, C, H, W = video.shape + B, N, __ = queries.shape + S = self.window_len + device = queries.device + + # B = batch size + # S = number of frames in the window of the padded video + # S_trimmed = actual number of frames in the window + # N = number of tracks + # C = color channels (3 for RGB) + # E = positional embedding size + # LRR = local receptive field radius + # D = dimension of the transformer input tokens + + # video = B T C H W + # queries = B N 3 + # coords_init = B S N 2 + # vis_init = B S N 1 + + assert S >= 2 # A tracker needs at least two frames to track something + if is_online: + assert T <= S, "Online mode: video chunk must be <= window size." + assert self.online_ind is not None, "Call model.init_video_online_processing() first." + assert not is_train, "Training not supported in online mode." + step = S // 2 # How much the sliding window moves at every step + video = 2 * (video / 255.0) - 1.0 + + # The first channel is the frame number + # The rest are the coordinates of points we want to track + queried_frames = queries[:, :, 0].long() # 获取帧数字 + + queried_coords = queries[..., 1:] + queried_coords = queried_coords / self.stride # 缩放 + + # We store our predictions here + coords_predicted = torch.zeros((B, T, N, 2), device=device) # 等待处理的预测的点 + vis_predicted = torch.zeros((B, T, N), device=device) + if is_online: + # 如果online的话,坐标都制成0, vis都是false + # 如果不是在线,就填充一圈0 + if self.online_coords_predicted is None: + # Init online predictions with zeros + self.online_coords_predicted = coords_predicted + self.online_vis_predicted = vis_predicted + else: + # Pad online predictions with zeros for the current window + pad = min(step, T - step) # 确保填充量不会超过剩余的时间帧数 + coords_predicted = F.pad( + self.online_coords_predicted, (0, 0, 0, 0, 0, pad), "constant" + ) + vis_predicted = F.pad(self.online_vis_predicted, (0, 0, 0, pad), "constant") + all_coords_predictions, all_vis_predictions = [], [] + + # Pad the video so that an integer number of sliding windows fit into it + # 填充视频,使得一个整数的滑动窗口能够适应它 + # TODO: we may drop this requirement because the transformer should not care + # TODO: pad the features instead of the video + # 下面这行计算需要填充的帧数 + pad = S - T if is_online else (S - T % S) % S # We don't want to pad if T % S == 0 + # 填充将最后一个帧复制pad遍 + video = F.pad(video.reshape(B, 1, T, C * H * W), (0, 0, 0, pad), "replicate").reshape( + B, -1, C, H, W + ) + + # Compute convolutional features for the video or for the current chunk in case of online mode + # 计算视频的卷积特征或者是在线计算当前的块 + fmaps = self.fnet(video.reshape(-1, C, H, W)).reshape( + B, -1, self.latent_dim, H // self.stride, W // self.stride + ) + + # We compute track features + # 内部是用双线性采样求feature maps feature + track_feat = self.get_track_feat( + fmaps, + queried_frames - self.online_ind if is_online else queried_frames, + queried_coords, + ).repeat(1, S, 1, 1) + if is_online: + # We update track features for the current window + sample_frames = queried_frames[:, None, :, None] # B 1 N 1 + left = 0 if self.online_ind == 0 else self.online_ind + step + right = self.online_ind + S + sample_mask = (sample_frames >= left) & (sample_frames < right) + if self.online_track_feat is None: + self.online_track_feat = torch.zeros_like(track_feat, device=device) + self.online_track_feat += track_feat * sample_mask + track_feat = self.online_track_feat.clone() + # We process ((num_windows - 1) * step + S) frames in total, so there are + # (ceil((T - S) / step) + 1) windows + num_windows = (T - S + step - 1) // step + 1 + # We process only the current video chunk in the online mode + indices = [self.online_ind] if is_online else range(0, step * num_windows, step) + + # 查询的坐标调整形状 + coords_init = queried_coords.reshape(B, 1, N, 2).expand(B, S, N, 2).float() + vis_init = torch.ones((B, S, N, 1), device=device).float() * 10 + for ind in indices: + # We copy over coords and vis for tracks that are queried + # by the end of the previous window, which is ind + overlap + # 处理重叠部分 + if ind > 0: + overlap = S - step + copy_over = (queried_frames < ind + overlap)[:, None, :, None] # B 1 N 1 + # 复制前一个窗口的预测结果 + coords_prev = torch.nn.functional.pad( + coords_predicted[:, ind : ind + overlap] / self.stride, + (0, 0, 0, 0, 0, step), + "replicate", + ) # B S N 2 + vis_prev = torch.nn.functional.pad( + vis_predicted[:, ind : ind + overlap, :, None].clone(), + (0, 0, 0, 0, 0, step), + "replicate", + ) # B S N 1 + coords_init = torch.where( + copy_over.expand_as(coords_init), coords_prev, coords_init + )# True就是coords_prev, False 就是coords_init + vis_init = torch.where(copy_over.expand_as(vis_init), vis_prev, vis_init) + + # The attention mask is 1 for the spatio-temporal points within + # a track which is updated in the current window + # 用于表示在当前窗口内需要更新的时间-空间点 + attention_mask = (queried_frames < ind + S).reshape(B, 1, N).repeat(1, S, 1) # B S N + + # The track mask is 1 for the spatio-temporal points that actually + # need updating: only after begin queried, and not if contained + # in a previous window + # track_mask表示实际需要更新的 + track_mask = ( + queried_frames[:, None, :, None] + <= torch.arange(ind, ind + S, device=device)[None, :, None, None] + ).contiguous() # B S N 1 + + if ind > 0: + track_mask[:, :overlap, :, :] = False + + # Predict the coordinates and visibility for the current window + # 用forward_window 来更新coords和vis + coords, vis = self.forward_window( + fmaps=fmaps if is_online else fmaps[:, ind : ind + S], + coords=coords_init, + track_feat=attention_mask.unsqueeze(-1) * track_feat, + vis=vis_init, + track_mask=track_mask, + attention_mask=attention_mask, + iters=iters, + ) + + S_trimmed = T if is_online else min(T - ind, S) # accounts for last window duration + coords_predicted[:, ind : ind + S] = coords[-1][:, :S_trimmed] + vis_predicted[:, ind : ind + S] = vis[:, :S_trimmed] + if is_train: + all_coords_predictions.append([coord[:, :S_trimmed] for coord in coords]) + all_vis_predictions.append(torch.sigmoid(vis[:, :S_trimmed])) + + if is_online: + self.online_ind += step + self.online_coords_predicted = coords_predicted + self.online_vis_predicted = vis_predicted + vis_predicted = torch.sigmoid(vis_predicted) + + if is_train: + mask = queried_frames[:, None] <= torch.arange(0, T, device=device)[None, :, None] + train_data = (all_coords_predictions, all_vis_predictions, mask) + else: + train_data = None + + return coords_predicted, vis_predicted, train_data + + +class EfficientUpdateFormer(nn.Module): + """ + Transformer model that updates track estimates. + """ + + def __init__( + self, + space_depth=6, + time_depth=6, + input_dim=320, + hidden_size=384, + num_heads=8, + output_dim=130, + mlp_ratio=4.0, + add_space_attn=True, + num_virtual_tracks=64, + ): + super().__init__() + self.out_channels = 2 + self.num_heads = num_heads + self.hidden_size = hidden_size + self.add_space_attn = add_space_attn + self.input_transform = torch.nn.Linear(input_dim, hidden_size, bias=True) + self.flow_head = torch.nn.Linear(hidden_size, output_dim, bias=True) + self.num_virtual_tracks = num_virtual_tracks + self.virual_tracks = nn.Parameter(torch.randn(1, num_virtual_tracks, 1, hidden_size)) + self.time_blocks = nn.ModuleList( + [ + AttnBlock( + hidden_size, + num_heads, + mlp_ratio=mlp_ratio, + attn_class=Attention, + ) + for _ in range(time_depth) + ] + ) + + if add_space_attn: + self.space_virtual_blocks = nn.ModuleList( + [ + AttnBlock( + hidden_size, + num_heads, + mlp_ratio=mlp_ratio, + attn_class=Attention, + ) + for _ in range(space_depth) + ] + ) + self.space_point2virtual_blocks = nn.ModuleList( + [ + CrossAttnBlock(hidden_size, hidden_size, num_heads, mlp_ratio=mlp_ratio) + for _ in range(space_depth) + ] + ) + self.space_virtual2point_blocks = nn.ModuleList( + [ + CrossAttnBlock(hidden_size, hidden_size, num_heads, mlp_ratio=mlp_ratio) + for _ in range(space_depth) + ] + ) + assert len(self.time_blocks) >= len(self.space_virtual2point_blocks) + self.initialize_weights() + + def initialize_weights(self): + def _basic_init(module): + if isinstance(module, nn.Linear): + torch.nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.constant_(module.bias, 0) + + self.apply(_basic_init) + + def forward(self, input_tensor, mask=None): + tokens = self.input_transform(input_tensor) + B, _, T, _ = tokens.shape + virtual_tokens = self.virual_tracks.repeat(B, 1, T, 1) + tokens = torch.cat([tokens, virtual_tokens], dim=1) + _, N, _, _ = tokens.shape + + j = 0 + for i in range(len(self.time_blocks)): + time_tokens = tokens.contiguous().view(B * N, T, -1) # B N T C -> (B N) T C + time_tokens = self.time_blocks[i](time_tokens) + + tokens = time_tokens.view(B, N, T, -1) # (B N) T C -> B N T C + if self.add_space_attn and ( + i % (len(self.time_blocks) // len(self.space_virtual_blocks)) == 0 + ): + space_tokens = ( + tokens.permute(0, 2, 1, 3).contiguous().view(B * T, N, -1) + ) # B N T C -> (B T) N C + point_tokens = space_tokens[:, : N - self.num_virtual_tracks] + virtual_tokens = space_tokens[:, N - self.num_virtual_tracks :] + + virtual_tokens = self.space_virtual2point_blocks[j]( + virtual_tokens, point_tokens, mask=mask + ) + virtual_tokens = self.space_virtual_blocks[j](virtual_tokens) + point_tokens = self.space_point2virtual_blocks[j]( + point_tokens, virtual_tokens, mask=mask + ) + space_tokens = torch.cat([point_tokens, virtual_tokens], dim=1) + tokens = space_tokens.view(B, T, N, -1).permute(0, 2, 1, 3) # (B T) N C -> B N T C + j += 1 + tokens = tokens[:, : N - self.num_virtual_tracks] + flow = self.flow_head(tokens) + return flow + + +class CrossAttnBlock(nn.Module): + def __init__(self, hidden_size, context_dim, num_heads=1, mlp_ratio=4.0, **block_kwargs): + super().__init__() + self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) + self.norm_context = nn.LayerNorm(hidden_size) + self.cross_attn = Attention( + hidden_size, context_dim=context_dim, num_heads=num_heads, qkv_bias=True, **block_kwargs + ) + + self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) + mlp_hidden_dim = int(hidden_size * mlp_ratio) + approx_gelu = lambda: nn.GELU(approximate="tanh") + self.mlp = Mlp( + in_features=hidden_size, + hidden_features=mlp_hidden_dim, + act_layer=approx_gelu, + drop=0, + ) + + def forward(self, x, context, mask=None): + if mask is not None: + if mask.shape[1] == x.shape[1]: + mask = mask[:, None, :, None].expand( + -1, self.cross_attn.heads, -1, context.shape[1] + ) + else: + mask = mask[:, None, None].expand(-1, self.cross_attn.heads, x.shape[1], -1) + + max_neg_value = -torch.finfo(x.dtype).max + attn_bias = (~mask) * max_neg_value + x = x + self.cross_attn( + self.norm1(x), context=self.norm_context(context), attn_bias=attn_bias + ) + x = x + self.mlp(self.norm2(x)) + return x diff --git a/cotracker/models/core/cotracker/losses.py b/cotracker/models/core/cotracker/losses.py index 2bdcc2e..0168d9d 100644 --- a/cotracker/models/core/cotracker/losses.py +++ b/cotracker/models/core/cotracker/losses.py @@ -1,61 +1,61 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn.functional as F -from cotracker.models.core.model_utils import reduce_masked_mean - -EPS = 1e-6 - - -def balanced_ce_loss(pred, gt, valid=None): - total_balanced_loss = 0.0 - for j in range(len(gt)): - B, S, N = gt[j].shape - # pred and gt are the same shape - for (a, b) in zip(pred[j].size(), gt[j].size()): - assert a == b # some shape mismatch! - # if valid is not None: - for (a, b) in zip(pred[j].size(), valid[j].size()): - assert a == b # some shape mismatch! - - pos = (gt[j] > 0.95).float() - neg = (gt[j] < 0.05).float() - - label = pos * 2.0 - 1.0 - a = -label * pred[j] - b = F.relu(a) - loss = b + torch.log(torch.exp(-b) + torch.exp(a - b)) - - pos_loss = reduce_masked_mean(loss, pos * valid[j]) - neg_loss = reduce_masked_mean(loss, neg * valid[j]) - - balanced_loss = pos_loss + neg_loss - total_balanced_loss += balanced_loss / float(N) - return total_balanced_loss - - -def sequence_loss(flow_preds, flow_gt, vis, valids, gamma=0.8): - """Loss function defined over sequence of flow predictions""" - total_flow_loss = 0.0 - for j in range(len(flow_gt)): - B, S, N, D = flow_gt[j].shape - assert D == 2 - B, S1, N = vis[j].shape - B, S2, N = valids[j].shape - assert S == S1 - assert S == S2 - n_predictions = len(flow_preds[j]) - flow_loss = 0.0 - for i in range(n_predictions): - i_weight = gamma ** (n_predictions - i - 1) - flow_pred = flow_preds[j][i] - i_loss = (flow_pred - flow_gt[j]).abs() # B, S, N, 2 - i_loss = torch.mean(i_loss, dim=3) # B, S, N - flow_loss += i_weight * reduce_masked_mean(i_loss, valids[j]) - flow_loss = flow_loss / n_predictions - total_flow_loss += flow_loss / float(N) - return total_flow_loss +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn.functional as F +from cotracker.models.core.model_utils import reduce_masked_mean + +EPS = 1e-6 + + +def balanced_ce_loss(pred, gt, valid=None): + total_balanced_loss = 0.0 + for j in range(len(gt)): + B, S, N = gt[j].shape + # pred and gt are the same shape + for (a, b) in zip(pred[j].size(), gt[j].size()): + assert a == b # some shape mismatch! + # if valid is not None: + for (a, b) in zip(pred[j].size(), valid[j].size()): + assert a == b # some shape mismatch! + + pos = (gt[j] > 0.95).float() + neg = (gt[j] < 0.05).float() + + label = pos * 2.0 - 1.0 + a = -label * pred[j] + b = F.relu(a) + loss = b + torch.log(torch.exp(-b) + torch.exp(a - b)) + + pos_loss = reduce_masked_mean(loss, pos * valid[j]) + neg_loss = reduce_masked_mean(loss, neg * valid[j]) + + balanced_loss = pos_loss + neg_loss + total_balanced_loss += balanced_loss / float(N) + return total_balanced_loss + + +def sequence_loss(flow_preds, flow_gt, vis, valids, gamma=0.8): + """Loss function defined over sequence of flow predictions""" + total_flow_loss = 0.0 + for j in range(len(flow_gt)): + B, S, N, D = flow_gt[j].shape + assert D == 2 + B, S1, N = vis[j].shape + B, S2, N = valids[j].shape + assert S == S1 + assert S == S2 + n_predictions = len(flow_preds[j]) + flow_loss = 0.0 + for i in range(n_predictions): + i_weight = gamma ** (n_predictions - i - 1) + flow_pred = flow_preds[j][i] + i_loss = (flow_pred - flow_gt[j]).abs() # B, S, N, 2 + i_loss = torch.mean(i_loss, dim=3) # B, S, N + flow_loss += i_weight * reduce_masked_mean(i_loss, valids[j]) + flow_loss = flow_loss / n_predictions + total_flow_loss += flow_loss / float(N) + return total_flow_loss diff --git a/cotracker/models/core/embeddings.py b/cotracker/models/core/embeddings.py index 897cd5d..2ee4aee 100644 --- a/cotracker/models/core/embeddings.py +++ b/cotracker/models/core/embeddings.py @@ -1,120 +1,120 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from typing import Tuple, Union -import torch - - -def get_2d_sincos_pos_embed( - embed_dim: int, grid_size: Union[int, Tuple[int, int]] -) -> torch.Tensor: - """ - This function initializes a grid and generates a 2D positional embedding using sine and cosine functions. - It is a wrapper of get_2d_sincos_pos_embed_from_grid. - Args: - - embed_dim: The embedding dimension. - - grid_size: The grid size. - Returns: - - pos_embed: The generated 2D positional embedding. - """ - if isinstance(grid_size, tuple): - grid_size_h, grid_size_w = grid_size - else: - grid_size_h = grid_size_w = grid_size - grid_h = torch.arange(grid_size_h, dtype=torch.float) - grid_w = torch.arange(grid_size_w, dtype=torch.float) - grid = torch.meshgrid(grid_w, grid_h, indexing="xy") - grid = torch.stack(grid, dim=0) - grid = grid.reshape([2, 1, grid_size_h, grid_size_w]) - pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) - return pos_embed.reshape(1, grid_size_h, grid_size_w, -1).permute(0, 3, 1, 2) - - -def get_2d_sincos_pos_embed_from_grid( - embed_dim: int, grid: torch.Tensor -) -> torch.Tensor: - """ - This function generates a 2D positional embedding from a given grid using sine and cosine functions. - - Args: - - embed_dim: The embedding dimension. - - grid: The grid to generate the embedding from. - - Returns: - - emb: The generated 2D positional embedding. - """ - assert embed_dim % 2 == 0 - - # use half of dimensions to encode grid_h - emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) - emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) - - emb = torch.cat([emb_h, emb_w], dim=2) # (H*W, D) - return emb - - -def get_1d_sincos_pos_embed_from_grid( - embed_dim: int, pos: torch.Tensor -) -> torch.Tensor: - """ - This function generates a 1D positional embedding from a given grid using sine and cosine functions. - - Args: - - embed_dim: The embedding dimension. - - pos: The position to generate the embedding from. - - Returns: - - emb: The generated 1D positional embedding. - """ - assert embed_dim % 2 == 0 - omega = torch.arange(embed_dim // 2, dtype=torch.double) - omega /= embed_dim / 2.0 - omega = 1.0 / 10000**omega # (D/2,) - - pos = pos.reshape(-1) # (M,) - out = torch.einsum("m,d->md", pos, omega) # (M, D/2), outer product - - emb_sin = torch.sin(out) # (M, D/2) - emb_cos = torch.cos(out) # (M, D/2) - - emb = torch.cat([emb_sin, emb_cos], dim=1) # (M, D) - return emb[None].float() - - -def get_2d_embedding(xy: torch.Tensor, C: int, cat_coords: bool = True) -> torch.Tensor: - """ - This function generates a 2D positional embedding from given coordinates using sine and cosine functions. - - Args: - - xy: The coordinates to generate the embedding from. - - C: The size of the embedding. - - cat_coords: A flag to indicate whether to concatenate the original coordinates to the embedding. - - Returns: - - pe: The generated 2D positional embedding. - """ - B, N, D = xy.shape - assert D == 2 - - x = xy[:, :, 0:1] - y = xy[:, :, 1:2] - div_term = ( - torch.arange(0, C, 2, device=xy.device, dtype=torch.float32) * (1000.0 / C) - ).reshape(1, 1, int(C / 2)) - - pe_x = torch.zeros(B, N, C, device=xy.device, dtype=torch.float32) - pe_y = torch.zeros(B, N, C, device=xy.device, dtype=torch.float32) - - pe_x[:, :, 0::2] = torch.sin(x * div_term) - pe_x[:, :, 1::2] = torch.cos(x * div_term) - - pe_y[:, :, 0::2] = torch.sin(y * div_term) - pe_y[:, :, 1::2] = torch.cos(y * div_term) - - pe = torch.cat([pe_x, pe_y], dim=2) # (B, N, C*3) - if cat_coords: - pe = torch.cat([xy, pe], dim=2) # (B, N, C*3+3) - return pe +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Tuple, Union +import torch + + +def get_2d_sincos_pos_embed( + embed_dim: int, grid_size: Union[int, Tuple[int, int]] +) -> torch.Tensor: + """ + This function initializes a grid and generates a 2D positional embedding using sine and cosine functions. + It is a wrapper of get_2d_sincos_pos_embed_from_grid. + Args: + - embed_dim: The embedding dimension. + - grid_size: The grid size. + Returns: + - pos_embed: The generated 2D positional embedding. + """ + if isinstance(grid_size, tuple): + grid_size_h, grid_size_w = grid_size + else: + grid_size_h = grid_size_w = grid_size + grid_h = torch.arange(grid_size_h, dtype=torch.float) + grid_w = torch.arange(grid_size_w, dtype=torch.float) + grid = torch.meshgrid(grid_w, grid_h, indexing="xy") + grid = torch.stack(grid, dim=0) + grid = grid.reshape([2, 1, grid_size_h, grid_size_w]) + pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) + return pos_embed.reshape(1, grid_size_h, grid_size_w, -1).permute(0, 3, 1, 2) + + +def get_2d_sincos_pos_embed_from_grid( + embed_dim: int, grid: torch.Tensor +) -> torch.Tensor: + """ + This function generates a 2D positional embedding from a given grid using sine and cosine functions. + + Args: + - embed_dim: The embedding dimension. + - grid: The grid to generate the embedding from. + + Returns: + - emb: The generated 2D positional embedding. + """ + assert embed_dim % 2 == 0 + + # use half of dimensions to encode grid_h + emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) + emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) + + emb = torch.cat([emb_h, emb_w], dim=2) # (H*W, D) + return emb + + +def get_1d_sincos_pos_embed_from_grid( + embed_dim: int, pos: torch.Tensor +) -> torch.Tensor: + """ + This function generates a 1D positional embedding from a given grid using sine and cosine functions. + + Args: + - embed_dim: The embedding dimension. + - pos: The position to generate the embedding from. + + Returns: + - emb: The generated 1D positional embedding. + """ + assert embed_dim % 2 == 0 + omega = torch.arange(embed_dim // 2, dtype=torch.double) + omega /= embed_dim / 2.0 + omega = 1.0 / 10000**omega # (D/2,) + + pos = pos.reshape(-1) # (M,) + out = torch.einsum("m,d->md", pos, omega) # (M, D/2), outer product + + emb_sin = torch.sin(out) # (M, D/2) + emb_cos = torch.cos(out) # (M, D/2) + + emb = torch.cat([emb_sin, emb_cos], dim=1) # (M, D) + return emb[None].float() + + +def get_2d_embedding(xy: torch.Tensor, C: int, cat_coords: bool = True) -> torch.Tensor: + """ + This function generates a 2D positional embedding from given coordinates using sine and cosine functions. + + Args: + - xy: The coordinates to generate the embedding from. + - C: The size of the embedding. + - cat_coords: A flag to indicate whether to concatenate the original coordinates to the embedding. + + Returns: + - pe: The generated 2D positional embedding. + """ + B, N, D = xy.shape + assert D == 2 + + x = xy[:, :, 0:1] + y = xy[:, :, 1:2] + div_term = ( + torch.arange(0, C, 2, device=xy.device, dtype=torch.float32) * (1000.0 / C) + ).reshape(1, 1, int(C / 2)) + + pe_x = torch.zeros(B, N, C, device=xy.device, dtype=torch.float32) + pe_y = torch.zeros(B, N, C, device=xy.device, dtype=torch.float32) + + pe_x[:, :, 0::2] = torch.sin(x * div_term) + pe_x[:, :, 1::2] = torch.cos(x * div_term) + + pe_y[:, :, 0::2] = torch.sin(y * div_term) + pe_y[:, :, 1::2] = torch.cos(y * div_term) + + pe = torch.cat([pe_x, pe_y], dim=2) # (B, N, C*3) + if cat_coords: + pe = torch.cat([xy, pe], dim=2) # (B, N, C*3+3) + return pe diff --git a/cotracker/models/core/model_utils.py b/cotracker/models/core/model_utils.py index 321d1ee..12afd4e 100644 --- a/cotracker/models/core/model_utils.py +++ b/cotracker/models/core/model_utils.py @@ -1,256 +1,256 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn.functional as F -from typing import Optional, Tuple - -EPS = 1e-6 - - -def smart_cat(tensor1, tensor2, dim): - if tensor1 is None: - return tensor2 - return torch.cat([tensor1, tensor2], dim=dim) - - -def get_points_on_a_grid( - size: int, - extent: Tuple[float, ...], - center: Optional[Tuple[float, ...]] = None, - device: Optional[torch.device] = torch.device("cpu"), -): - r"""Get a grid of points covering a rectangular region - - `get_points_on_a_grid(size, extent)` generates a :attr:`size` by - :attr:`size` grid fo points distributed to cover a rectangular area - specified by `extent`. - - The `extent` is a pair of integer :math:`(H,W)` specifying the height - and width of the rectangle. - - Optionally, the :attr:`center` can be specified as a pair :math:`(c_y,c_x)` - specifying the vertical and horizontal center coordinates. The center - defaults to the middle of the extent. - - Points are distributed uniformly within the rectangle leaving a margin - :math:`m=W/64` from the border. - - It returns a :math:`(1, \text{size} \times \text{size}, 2)` tensor of - points :math:`P_{ij}=(x_i, y_i)` where - - .. math:: - P_{ij} = \left( - c_x + m -\frac{W}{2} + \frac{W - 2m}{\text{size} - 1}\, j,~ - c_y + m -\frac{H}{2} + \frac{H - 2m}{\text{size} - 1}\, i - \right) - - Points are returned in row-major order. - - Args: - size (int): grid size. - extent (tuple): height and with of the grid extent. - center (tuple, optional): grid center. - device (str, optional): Defaults to `"cpu"`. - - Returns: - Tensor: grid. - """ - if size == 1: - return torch.tensor([extent[1] / 2, extent[0] / 2], device=device)[None, None] - - if center is None: - center = [extent[0] / 2, extent[1] / 2] - - margin = extent[1] / 64 - range_y = (margin - extent[0] / 2 + center[0], extent[0] / 2 + center[0] - margin) - range_x = (margin - extent[1] / 2 + center[1], extent[1] / 2 + center[1] - margin) - grid_y, grid_x = torch.meshgrid( - torch.linspace(*range_y, size, device=device), - torch.linspace(*range_x, size, device=device), - indexing="ij", - ) - return torch.stack([grid_x, grid_y], dim=-1).reshape(1, -1, 2) - - -def reduce_masked_mean(input, mask, dim=None, keepdim=False): - r"""Masked mean - - `reduce_masked_mean(x, mask)` computes the mean of a tensor :attr:`input` - over a mask :attr:`mask`, returning - - .. math:: - \text{output} = - \frac - {\sum_{i=1}^N \text{input}_i \cdot \text{mask}_i} - {\epsilon + \sum_{i=1}^N \text{mask}_i} - - where :math:`N` is the number of elements in :attr:`input` and - :attr:`mask`, and :math:`\epsilon` is a small constant to avoid - division by zero. - - `reduced_masked_mean(x, mask, dim)` computes the mean of a tensor - :attr:`input` over a mask :attr:`mask` along a dimension :attr:`dim`. - Optionally, the dimension can be kept in the output by setting - :attr:`keepdim` to `True`. Tensor :attr:`mask` must be broadcastable to - the same dimension as :attr:`input`. - - The interface is similar to `torch.mean()`. - - Args: - inout (Tensor): input tensor. - mask (Tensor): mask. - dim (int, optional): Dimension to sum over. Defaults to None. - keepdim (bool, optional): Keep the summed dimension. Defaults to False. - - Returns: - Tensor: mean tensor. - """ - - mask = mask.expand_as(input) - - prod = input * mask - - if dim is None: - numer = torch.sum(prod) - denom = torch.sum(mask) - else: - numer = torch.sum(prod, dim=dim, keepdim=keepdim) - denom = torch.sum(mask, dim=dim, keepdim=keepdim) - - mean = numer / (EPS + denom) - return mean - - -def bilinear_sampler(input, coords, align_corners=True, padding_mode="border"): - r"""Sample a tensor using bilinear interpolation - - `bilinear_sampler(input, coords)` samples a tensor :attr:`input` at - coordinates :attr:`coords` using bilinear interpolation. It is the same - as `torch.nn.functional.grid_sample()` but with a different coordinate - convention. - - The input tensor is assumed to be of shape :math:`(B, C, H, W)`, where - :math:`B` is the batch size, :math:`C` is the number of channels, - :math:`H` is the height of the image, and :math:`W` is the width of the - image. The tensor :attr:`coords` of shape :math:`(B, H_o, W_o, 2)` is - interpreted as an array of 2D point coordinates :math:`(x_i,y_i)`. - - Alternatively, the input tensor can be of size :math:`(B, C, T, H, W)`, - in which case sample points are triplets :math:`(t_i,x_i,y_i)`. Note - that in this case the order of the components is slightly different - from `grid_sample()`, which would expect :math:`(x_i,y_i,t_i)`. - - If `align_corners` is `True`, the coordinate :math:`x` is assumed to be - in the range :math:`[0,W-1]`, with 0 corresponding to the center of the - left-most image pixel :math:`W-1` to the center of the right-most - pixel. - - If `align_corners` is `False`, the coordinate :math:`x` is assumed to - be in the range :math:`[0,W]`, with 0 corresponding to the left edge of - the left-most pixel :math:`W` to the right edge of the right-most - pixel. - - Similar conventions apply to the :math:`y` for the range - :math:`[0,H-1]` and :math:`[0,H]` and to :math:`t` for the range - :math:`[0,T-1]` and :math:`[0,T]`. - - Args: - input (Tensor): batch of input images. - coords (Tensor): batch of coordinates. - align_corners (bool, optional): Coordinate convention. Defaults to `True`. - padding_mode (str, optional): Padding mode. Defaults to `"border"`. - - Returns: - Tensor: sampled points. - """ - - sizes = input.shape[2:] - - assert len(sizes) in [2, 3] - - if len(sizes) == 3: - # t x y -> x y t to match dimensions T H W in grid_sample - coords = coords[..., [1, 2, 0]] - - if align_corners: - coords = coords * torch.tensor( - [2 / max(size - 1, 1) for size in reversed(sizes)], device=coords.device - ) - else: - coords = coords * torch.tensor([2 / size for size in reversed(sizes)], device=coords.device) - - coords -= 1 - - return F.grid_sample(input, coords, align_corners=align_corners, padding_mode=padding_mode) - - -def sample_features4d(input, coords): - r"""Sample spatial features - - `sample_features4d(input, coords)` samples the spatial features - :attr:`input` represented by a 4D tensor :math:`(B, C, H, W)`. - - The field is sampled at coordinates :attr:`coords` using bilinear - interpolation. :attr:`coords` is assumed to be of shape :math:`(B, R, - 3)`, where each sample has the format :math:`(x_i, y_i)`. This uses the - same convention as :func:`bilinear_sampler` with `align_corners=True`. - - The output tensor has one feature per point, and has shape :math:`(B, - R, C)`. - - Args: - input (Tensor): spatial features. - coords (Tensor): points. - - Returns: - Tensor: sampled features. - """ - - B, _, _, _ = input.shape - - # B R 2 -> B R 1 2 - coords = coords.unsqueeze(2) - - # B C R 1 - feats = bilinear_sampler(input, coords) - - return feats.permute(0, 2, 1, 3).view( - B, -1, feats.shape[1] * feats.shape[3] - ) # B C R 1 -> B R C - - -def sample_features5d(input, coords): - r"""Sample spatio-temporal features - - `sample_features5d(input, coords)` works in the same way as - :func:`sample_features4d` but for spatio-temporal features and points: - :attr:`input` is a 5D tensor :math:`(B, T, C, H, W)`, :attr:`coords` is - a :math:`(B, R1, R2, 3)` tensor of spatio-temporal point :math:`(t_i, - x_i, y_i)`. The output tensor has shape :math:`(B, R1, R2, C)`. - - Args: - input (Tensor): spatio-temporal features. - coords (Tensor): spatio-temporal points. - - Returns: - Tensor: sampled features. - """ - - B, T, _, _, _ = input.shape - - # B T C H W -> B C T H W - input = input.permute(0, 2, 1, 3, 4) - - # B R1 R2 3 -> B R1 R2 1 3 - coords = coords.unsqueeze(3) - - # B C R1 R2 1 - feats = bilinear_sampler(input, coords) - - return feats.permute(0, 2, 3, 1, 4).view( - B, feats.shape[2], feats.shape[3], feats.shape[1] - ) # B C R1 R2 1 -> B R1 R2 C +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn.functional as F +from typing import Optional, Tuple + +EPS = 1e-6 + + +def smart_cat(tensor1, tensor2, dim): + if tensor1 is None: + return tensor2 + return torch.cat([tensor1, tensor2], dim=dim) + + +def get_points_on_a_grid( + size: int, + extent: Tuple[float, ...], + center: Optional[Tuple[float, ...]] = None, + device: Optional[torch.device] = torch.device("cpu"), +): + r"""Get a grid of points covering a rectangular region + + `get_points_on_a_grid(size, extent)` generates a :attr:`size` by + :attr:`size` grid fo points distributed to cover a rectangular area + specified by `extent`. + + The `extent` is a pair of integer :math:`(H,W)` specifying the height + and width of the rectangle. + + Optionally, the :attr:`center` can be specified as a pair :math:`(c_y,c_x)` + specifying the vertical and horizontal center coordinates. The center + defaults to the middle of the extent. + + Points are distributed uniformly within the rectangle leaving a margin + :math:`m=W/64` from the border. + + It returns a :math:`(1, \text{size} \times \text{size}, 2)` tensor of + points :math:`P_{ij}=(x_i, y_i)` where + + .. math:: + P_{ij} = \left( + c_x + m -\frac{W}{2} + \frac{W - 2m}{\text{size} - 1}\, j,~ + c_y + m -\frac{H}{2} + \frac{H - 2m}{\text{size} - 1}\, i + \right) + + Points are returned in row-major order. + + Args: + size (int): grid size. + extent (tuple): height and with of the grid extent. + center (tuple, optional): grid center. + device (str, optional): Defaults to `"cpu"`. + + Returns: + Tensor: grid. + """ + if size == 1: + return torch.tensor([extent[1] / 2, extent[0] / 2], device=device)[None, None] + + if center is None: + center = [extent[0] / 2, extent[1] / 2] + + margin = extent[1] / 64 + range_y = (margin - extent[0] / 2 + center[0], extent[0] / 2 + center[0] - margin) + range_x = (margin - extent[1] / 2 + center[1], extent[1] / 2 + center[1] - margin) + grid_y, grid_x = torch.meshgrid( + torch.linspace(*range_y, size, device=device), + torch.linspace(*range_x, size, device=device), + indexing="ij", + ) + return torch.stack([grid_x, grid_y], dim=-1).reshape(1, -1, 2) + + +def reduce_masked_mean(input, mask, dim=None, keepdim=False): + r"""Masked mean + + `reduce_masked_mean(x, mask)` computes the mean of a tensor :attr:`input` + over a mask :attr:`mask`, returning + + .. math:: + \text{output} = + \frac + {\sum_{i=1}^N \text{input}_i \cdot \text{mask}_i} + {\epsilon + \sum_{i=1}^N \text{mask}_i} + + where :math:`N` is the number of elements in :attr:`input` and + :attr:`mask`, and :math:`\epsilon` is a small constant to avoid + division by zero. + + `reduced_masked_mean(x, mask, dim)` computes the mean of a tensor + :attr:`input` over a mask :attr:`mask` along a dimension :attr:`dim`. + Optionally, the dimension can be kept in the output by setting + :attr:`keepdim` to `True`. Tensor :attr:`mask` must be broadcastable to + the same dimension as :attr:`input`. + + The interface is similar to `torch.mean()`. + + Args: + inout (Tensor): input tensor. + mask (Tensor): mask. + dim (int, optional): Dimension to sum over. Defaults to None. + keepdim (bool, optional): Keep the summed dimension. Defaults to False. + + Returns: + Tensor: mean tensor. + """ + + mask = mask.expand_as(input) + + prod = input * mask + + if dim is None: + numer = torch.sum(prod) + denom = torch.sum(mask) + else: + numer = torch.sum(prod, dim=dim, keepdim=keepdim) + denom = torch.sum(mask, dim=dim, keepdim=keepdim) + + mean = numer / (EPS + denom) + return mean + + +def bilinear_sampler(input, coords, align_corners=True, padding_mode="border"): + r"""Sample a tensor using bilinear interpolation + + `bilinear_sampler(input, coords)` samples a tensor :attr:`input` at + coordinates :attr:`coords` using bilinear interpolation. It is the same + as `torch.nn.functional.grid_sample()` but with a different coordinate + convention. + + The input tensor is assumed to be of shape :math:`(B, C, H, W)`, where + :math:`B` is the batch size, :math:`C` is the number of channels, + :math:`H` is the height of the image, and :math:`W` is the width of the + image. The tensor :attr:`coords` of shape :math:`(B, H_o, W_o, 2)` is + interpreted as an array of 2D point coordinates :math:`(x_i,y_i)`. + + Alternatively, the input tensor can be of size :math:`(B, C, T, H, W)`, + in which case sample points are triplets :math:`(t_i,x_i,y_i)`. Note + that in this case the order of the components is slightly different + from `grid_sample()`, which would expect :math:`(x_i,y_i,t_i)`. + + If `align_corners` is `True`, the coordinate :math:`x` is assumed to be + in the range :math:`[0,W-1]`, with 0 corresponding to the center of the + left-most image pixel :math:`W-1` to the center of the right-most + pixel. + + If `align_corners` is `False`, the coordinate :math:`x` is assumed to + be in the range :math:`[0,W]`, with 0 corresponding to the left edge of + the left-most pixel :math:`W` to the right edge of the right-most + pixel. + + Similar conventions apply to the :math:`y` for the range + :math:`[0,H-1]` and :math:`[0,H]` and to :math:`t` for the range + :math:`[0,T-1]` and :math:`[0,T]`. + + Args: + input (Tensor): batch of input images. + coords (Tensor): batch of coordinates. + align_corners (bool, optional): Coordinate convention. Defaults to `True`. + padding_mode (str, optional): Padding mode. Defaults to `"border"`. + + Returns: + Tensor: sampled points. + """ + + sizes = input.shape[2:] + + assert len(sizes) in [2, 3] + + if len(sizes) == 3: + # t x y -> x y t to match dimensions T H W in grid_sample + coords = coords[..., [1, 2, 0]] + + if align_corners: + coords = coords * torch.tensor( + [2 / max(size - 1, 1) for size in reversed(sizes)], device=coords.device + ) + else: + coords = coords * torch.tensor([2 / size for size in reversed(sizes)], device=coords.device) + + coords -= 1 + + return F.grid_sample(input, coords, align_corners=align_corners, padding_mode=padding_mode) + + +def sample_features4d(input, coords): + r"""Sample spatial features + + `sample_features4d(input, coords)` samples the spatial features + :attr:`input` represented by a 4D tensor :math:`(B, C, H, W)`. + + The field is sampled at coordinates :attr:`coords` using bilinear + interpolation. :attr:`coords` is assumed to be of shape :math:`(B, R, + 3)`, where each sample has the format :math:`(x_i, y_i)`. This uses the + same convention as :func:`bilinear_sampler` with `align_corners=True`. + + The output tensor has one feature per point, and has shape :math:`(B, + R, C)`. + + Args: + input (Tensor): spatial features. + coords (Tensor): points. + + Returns: + Tensor: sampled features. + """ + + B, _, _, _ = input.shape + + # B R 2 -> B R 1 2 + coords = coords.unsqueeze(2) + + # B C R 1 + feats = bilinear_sampler(input, coords) + + return feats.permute(0, 2, 1, 3).view( + B, -1, feats.shape[1] * feats.shape[3] + ) # B C R 1 -> B R C + + +def sample_features5d(input, coords): + r"""Sample spatio-temporal features + + `sample_features5d(input, coords)` works in the same way as + :func:`sample_features4d` but for spatio-temporal features and points: + :attr:`input` is a 5D tensor :math:`(B, T, C, H, W)`, :attr:`coords` is + a :math:`(B, R1, R2, 3)` tensor of spatio-temporal point :math:`(t_i, + x_i, y_i)`. The output tensor has shape :math:`(B, R1, R2, C)`. + + Args: + input (Tensor): spatio-temporal features. + coords (Tensor): spatio-temporal points. + + Returns: + Tensor: sampled features. + """ + + B, T, _, _, _ = input.shape + + # B T C H W -> B C T H W + input = input.permute(0, 2, 1, 3, 4) + + # B R1 R2 3 -> B R1 R2 1 3 + coords = coords.unsqueeze(3) + + # B C R1 R2 1 + feats = bilinear_sampler(input, coords) + + return feats.permute(0, 2, 3, 1, 4).view( + B, feats.shape[2], feats.shape[3], feats.shape[1] + ) # B C R1 R2 1 -> B R1 R2 C diff --git a/cotracker/models/evaluation_predictor.py b/cotracker/models/evaluation_predictor.py index 87f8e18..223eb3c 100644 --- a/cotracker/models/evaluation_predictor.py +++ b/cotracker/models/evaluation_predictor.py @@ -1,104 +1,104 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn.functional as F -from typing import Tuple - -from cotracker.models.core.cotracker.cotracker import CoTracker2 -from cotracker.models.core.model_utils import get_points_on_a_grid - - -class EvaluationPredictor(torch.nn.Module): - def __init__( - self, - cotracker_model: CoTracker2, - interp_shape: Tuple[int, int] = (384, 512), - grid_size: int = 5, - local_grid_size: int = 8, - single_point: bool = True, - n_iters: int = 6, - ) -> None: - super(EvaluationPredictor, self).__init__() - self.grid_size = grid_size - self.local_grid_size = local_grid_size - self.single_point = single_point - self.interp_shape = interp_shape - self.n_iters = n_iters - - self.model = cotracker_model - self.model.eval() - - def forward(self, video, queries): - queries = queries.clone() - B, T, C, H, W = video.shape - B, N, D = queries.shape - - assert D == 3 - - video = video.reshape(B * T, C, H, W) - video = F.interpolate(video, tuple(self.interp_shape), mode="bilinear", align_corners=True) - video = video.reshape(B, T, 3, self.interp_shape[0], self.interp_shape[1]) - - device = video.device - - queries[:, :, 1] *= (self.interp_shape[1] - 1) / (W - 1) - queries[:, :, 2] *= (self.interp_shape[0] - 1) / (H - 1) - - if self.single_point: - traj_e = torch.zeros((B, T, N, 2), device=device) - vis_e = torch.zeros((B, T, N), device=device) - for pind in range((N)): - query = queries[:, pind : pind + 1] - - t = query[0, 0, 0].long() - - traj_e_pind, vis_e_pind = self._process_one_point(video, query) - traj_e[:, t:, pind : pind + 1] = traj_e_pind[:, :, :1] - vis_e[:, t:, pind : pind + 1] = vis_e_pind[:, :, :1] - else: - if self.grid_size > 0: - xy = get_points_on_a_grid(self.grid_size, video.shape[3:]) - xy = torch.cat([torch.zeros_like(xy[:, :, :1]), xy], dim=2).to(device) # - queries = torch.cat([queries, xy], dim=1) # - - traj_e, vis_e, __ = self.model( - video=video, - queries=queries, - iters=self.n_iters, - ) - - traj_e[:, :, :, 0] *= (W - 1) / float(self.interp_shape[1] - 1) - traj_e[:, :, :, 1] *= (H - 1) / float(self.interp_shape[0] - 1) - return traj_e, vis_e - - def _process_one_point(self, video, query): - t = query[0, 0, 0].long() - - device = query.device - if self.local_grid_size > 0: - xy_target = get_points_on_a_grid( - self.local_grid_size, - (50, 50), - [query[0, 0, 2].item(), query[0, 0, 1].item()], - ) - - xy_target = torch.cat([torch.zeros_like(xy_target[:, :, :1]), xy_target], dim=2).to( - device - ) # - query = torch.cat([query, xy_target], dim=1) # - - if self.grid_size > 0: - xy = get_points_on_a_grid(self.grid_size, video.shape[3:]) - xy = torch.cat([torch.zeros_like(xy[:, :, :1]), xy], dim=2).to(device) # - query = torch.cat([query, xy], dim=1) # - # crop the video to start from the queried frame - query[0, 0, 0] = 0 - traj_e_pind, vis_e_pind, __ = self.model( - video=video[:, t:], queries=query, iters=self.n_iters - ) - - return traj_e_pind, vis_e_pind +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn.functional as F +from typing import Tuple + +from cotracker.models.core.cotracker.cotracker import CoTracker2 +from cotracker.models.core.model_utils import get_points_on_a_grid + + +class EvaluationPredictor(torch.nn.Module): + def __init__( + self, + cotracker_model: CoTracker2, + interp_shape: Tuple[int, int] = (384, 512), + grid_size: int = 5, + local_grid_size: int = 8, + single_point: bool = True, + n_iters: int = 6, + ) -> None: + super(EvaluationPredictor, self).__init__() + self.grid_size = grid_size + self.local_grid_size = local_grid_size + self.single_point = single_point + self.interp_shape = interp_shape + self.n_iters = n_iters + + self.model = cotracker_model + self.model.eval() + + def forward(self, video, queries): + queries = queries.clone() + B, T, C, H, W = video.shape + B, N, D = queries.shape + + assert D == 3 + + video = video.reshape(B * T, C, H, W) + video = F.interpolate(video, tuple(self.interp_shape), mode="bilinear", align_corners=True) + video = video.reshape(B, T, 3, self.interp_shape[0], self.interp_shape[1]) + + device = video.device + + queries[:, :, 1] *= (self.interp_shape[1] - 1) / (W - 1) + queries[:, :, 2] *= (self.interp_shape[0] - 1) / (H - 1) + + if self.single_point: + traj_e = torch.zeros((B, T, N, 2), device=device) + vis_e = torch.zeros((B, T, N), device=device) + for pind in range((N)): + query = queries[:, pind : pind + 1] + + t = query[0, 0, 0].long() + + traj_e_pind, vis_e_pind = self._process_one_point(video, query) + traj_e[:, t:, pind : pind + 1] = traj_e_pind[:, :, :1] + vis_e[:, t:, pind : pind + 1] = vis_e_pind[:, :, :1] + else: + if self.grid_size > 0: + xy = get_points_on_a_grid(self.grid_size, video.shape[3:]) + xy = torch.cat([torch.zeros_like(xy[:, :, :1]), xy], dim=2).to(device) # + queries = torch.cat([queries, xy], dim=1) # + + traj_e, vis_e, __ = self.model( + video=video, + queries=queries, + iters=self.n_iters, + ) + + traj_e[:, :, :, 0] *= (W - 1) / float(self.interp_shape[1] - 1) + traj_e[:, :, :, 1] *= (H - 1) / float(self.interp_shape[0] - 1) + return traj_e, vis_e + + def _process_one_point(self, video, query): + t = query[0, 0, 0].long() + + device = query.device + if self.local_grid_size > 0: + xy_target = get_points_on_a_grid( + self.local_grid_size, + (50, 50), + [query[0, 0, 2].item(), query[0, 0, 1].item()], + ) + + xy_target = torch.cat([torch.zeros_like(xy_target[:, :, :1]), xy_target], dim=2).to( + device + ) # + query = torch.cat([query, xy_target], dim=1) # + + if self.grid_size > 0: + xy = get_points_on_a_grid(self.grid_size, video.shape[3:]) + xy = torch.cat([torch.zeros_like(xy[:, :, :1]), xy], dim=2).to(device) # + query = torch.cat([query, xy], dim=1) # + # crop the video to start from the queried frame + query[0, 0, 0] = 0 + traj_e_pind, vis_e_pind, __ = self.model( + video=video[:, t:], queries=query, iters=self.n_iters + ) + + return traj_e_pind, vis_e_pind diff --git a/cotracker/predictor.py b/cotracker/predictor.py index 067b50d..9778a7e 100644 --- a/cotracker/predictor.py +++ b/cotracker/predictor.py @@ -1,275 +1,279 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn.functional as F - -from cotracker.models.core.model_utils import smart_cat, get_points_on_a_grid -from cotracker.models.build_cotracker import build_cotracker - - -class CoTrackerPredictor(torch.nn.Module): - def __init__(self, checkpoint="./checkpoints/cotracker2.pth"): - super().__init__() - self.support_grid_size = 6 - model = build_cotracker(checkpoint) - self.interp_shape = model.model_resolution - print(self.interp_shape) - self.model = model - self.model.eval() - - @torch.no_grad() - def forward( - self, - video, # (B, T, 3, H, W) Batch_size, time, rgb, height, width - # input prompt types: - # - None. Dense tracks are computed in this case. You can adjust *query_frame* to compute tracks starting from a specific frame. - # *backward_tracking=True* will compute tracks in both directions. - # - queries. Queried points of shape (B, N, 3) in format (t, x, y) for frame index and pixel coordinates. - # - grid_size. Grid of N*N points from the first frame. if segm_mask is provided, then computed only for the mask. - # You can adjust *query_frame* and *backward_tracking* for the regular grid in the same way as for dense tracks. - queries: torch.Tensor = None, - segm_mask: torch.Tensor = None, # Segmentation mask of shape (B, 1, H, W) - grid_size: int = 0, - grid_query_frame: int = 0, # only for dense and regular grid tracks - backward_tracking: bool = False, - ): - if queries is None and grid_size == 0: - tracks, visibilities = self._compute_dense_tracks( - video, - grid_query_frame=grid_query_frame, - backward_tracking=backward_tracking, - ) - else: - tracks, visibilities = self._compute_sparse_tracks( - video, - queries, - segm_mask, - grid_size, - add_support_grid=(grid_size == 0 or segm_mask is not None), - grid_query_frame=grid_query_frame, - backward_tracking=backward_tracking, - ) - - return tracks, visibilities - - def _compute_dense_tracks(self, video, grid_query_frame, grid_size=80, backward_tracking=False): - *_, H, W = video.shape - grid_step = W // grid_size - grid_width = W // grid_step - grid_height = H // grid_step # set the whole video to grid_size number of grids - tracks = visibilities = None - grid_pts = torch.zeros((1, grid_width * grid_height, 3)).to(video.device) - # (batch_size, grid_number, t,x,y) - grid_pts[0, :, 0] = grid_query_frame - # iterate every grid - for offset in range(grid_step * grid_step): - print(f"step {offset} / {grid_step * grid_step}") - ox = offset % grid_step - oy = offset // grid_step - # initialize - # for example - # grid width = 4, grid height = 4, grid step = 10, ox = 1 - # torch.arange(grid_width) = [0,1,2,3] - # torch.arange(grid_width).repeat(grid_height) = [0,1,2,3,0,1,2,3,0,1,2,3] - # torch.arange(grid_width).repeat(grid_height) * grid_step = [0,10,20,30,0,10,20,30,0,10,20,30] - # get the location in the image - grid_pts[0, :, 1] = torch.arange(grid_width).repeat(grid_height) * grid_step + ox - grid_pts[0, :, 2] = ( - torch.arange(grid_height).repeat_interleave(grid_width) * grid_step + oy - ) - tracks_step, visibilities_step = self._compute_sparse_tracks( - video=video, - queries=grid_pts, - backward_tracking=backward_tracking, - ) - tracks = smart_cat(tracks, tracks_step, dim=2) - visibilities = smart_cat(visibilities, visibilities_step, dim=2) - - return tracks, visibilities - - def _compute_sparse_tracks( - self, - video, - queries, - segm_mask=None, - grid_size=0, - add_support_grid=False, - grid_query_frame=0, - backward_tracking=False, - ): - B, T, C, H, W = video.shape - - video = video.reshape(B * T, C, H, W) - # ? what is interpolate? - # 将video插值成interp_shape? - video = F.interpolate(video, tuple(self.interp_shape), mode="bilinear", align_corners=True) - video = video.reshape(B, T, 3, self.interp_shape[0], self.interp_shape[1]) - - if queries is not None: - B, N, D = queries.shape # batch_size, number of points, (t,x,y) - assert D == 3 - # query 缩放到( interp_shape - 1 ) / (W - 1) - # 插完值之后缩放 - queries = queries.clone() - queries[:, :, 1:] *= queries.new_tensor( - [ - (self.interp_shape[1] - 1) / (W - 1), - (self.interp_shape[0] - 1) / (H - 1), - ] - ) - # 生成grid - elif grid_size > 0: - grid_pts = get_points_on_a_grid(grid_size, self.interp_shape, device=video.device) - if segm_mask is not None: - segm_mask = F.interpolate(segm_mask, tuple(self.interp_shape), mode="nearest") - point_mask = segm_mask[0, 0][ - (grid_pts[0, :, 1]).round().long().cpu(), - (grid_pts[0, :, 0]).round().long().cpu(), - ].bool() - grid_pts = grid_pts[:, point_mask] - - queries = torch.cat( - [torch.ones_like(grid_pts[:, :, :1]) * grid_query_frame, grid_pts], - dim=2, - ).repeat(B, 1, 1) - - # 添加支持点 - - if add_support_grid: - grid_pts = get_points_on_a_grid( - self.support_grid_size, self.interp_shape, device=video.device - ) - grid_pts = torch.cat([torch.zeros_like(grid_pts[:, :, :1]), grid_pts], dim=2) - grid_pts = grid_pts.repeat(B, 1, 1) - queries = torch.cat([queries, grid_pts], dim=1) - - tracks, visibilities, __ = self.model.forward(video=video, queries=queries, iters=6) - - if backward_tracking: - tracks, visibilities = self._compute_backward_tracks( - video, queries, tracks, visibilities - ) - if add_support_grid: - queries[:, -self.support_grid_size**2 :, 0] = T - 1 - if add_support_grid: - tracks = tracks[:, :, : -self.support_grid_size**2] - visibilities = visibilities[:, :, : -self.support_grid_size**2] - thr = 0.9 - visibilities = visibilities > thr - - # correct query-point predictions - # see https://github.com/facebookresearch/co-tracker/issues/28 - - # TODO: batchify - for i in range(len(queries)): - queries_t = queries[i, : tracks.size(2), 0].to(torch.int64) - arange = torch.arange(0, len(queries_t)) - - # overwrite the predictions with the query points - tracks[i, queries_t, arange] = queries[i, : tracks.size(2), 1:] - - # correct visibilities, the query points should be visible - visibilities[i, queries_t, arange] = True - - tracks *= tracks.new_tensor( - [(W - 1) / (self.interp_shape[1] - 1), (H - 1) / (self.interp_shape[0] - 1)] - ) - return tracks, visibilities - - def _compute_backward_tracks(self, video, queries, tracks, visibilities): - inv_video = video.flip(1).clone() - inv_queries = queries.clone() - inv_queries[:, :, 0] = inv_video.shape[1] - inv_queries[:, :, 0] - 1 - - inv_tracks, inv_visibilities, __ = self.model(video=inv_video, queries=inv_queries, iters=6) - - inv_tracks = inv_tracks.flip(1) - inv_visibilities = inv_visibilities.flip(1) - arange = torch.arange(video.shape[1], device=queries.device)[None, :, None] - - mask = (arange < queries[:, None, :, 0]).unsqueeze(-1).repeat(1, 1, 1, 2) - - tracks[mask] = inv_tracks[mask] - visibilities[mask[:, :, :, 0]] = inv_visibilities[mask[:, :, :, 0]] - return tracks, visibilities - - -class CoTrackerOnlinePredictor(torch.nn.Module): - def __init__(self, checkpoint="./checkpoints/cotracker2.pth"): - super().__init__() - self.support_grid_size = 6 - model = build_cotracker(checkpoint) - self.interp_shape = model.model_resolution - self.step = model.window_len // 2 - self.model = model - self.model.eval() - - @torch.no_grad() - def forward( - self, - video_chunk, - is_first_step: bool = False, - queries: torch.Tensor = None, - grid_size: int = 10, - grid_query_frame: int = 0, - add_support_grid=False, - ): - B, T, C, H, W = video_chunk.shape - # Initialize online video processing and save queried points - # This needs to be done before processing *each new video* - if is_first_step: - self.model.init_video_online_processing() - if queries is not None: - B, N, D = queries.shape - assert D == 3 - queries = queries.clone() - queries[:, :, 1:] *= queries.new_tensor( - [ - (self.interp_shape[1] - 1) / (W - 1), - (self.interp_shape[0] - 1) / (H - 1), - ] - ) - elif grid_size > 0: - grid_pts = get_points_on_a_grid( - grid_size, self.interp_shape, device=video_chunk.device - ) - queries = torch.cat( - [torch.ones_like(grid_pts[:, :, :1]) * grid_query_frame, grid_pts], - dim=2, - ) - if add_support_grid: - grid_pts = get_points_on_a_grid( - self.support_grid_size, self.interp_shape, device=video_chunk.device - ) - grid_pts = torch.cat([torch.zeros_like(grid_pts[:, :, :1]), grid_pts], dim=2) - queries = torch.cat([queries, grid_pts], dim=1) - self.queries = queries - return (None, None) - - video_chunk = video_chunk.reshape(B * T, C, H, W) - video_chunk = F.interpolate( - video_chunk, tuple(self.interp_shape), mode="bilinear", align_corners=True - ) - video_chunk = video_chunk.reshape(B, T, 3, self.interp_shape[0], self.interp_shape[1]) - - tracks, visibilities, __ = self.model( - video=video_chunk, - queries=self.queries, - iters=6, - is_online=True, - ) - thr = 0.9 - return ( - tracks - * tracks.new_tensor( - [ - (W - 1) / (self.interp_shape[1] - 1), - (H - 1) / (self.interp_shape[0] - 1), - ] - ), - visibilities > thr, - ) +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn.functional as F + +from cotracker.models.core.model_utils import smart_cat, get_points_on_a_grid +from cotracker.models.build_cotracker import build_cotracker + + +class CoTrackerPredictor(torch.nn.Module): + def __init__(self, checkpoint="./checkpoints/cotracker2.pth"): + super().__init__() + self.support_grid_size = 6 + model = build_cotracker(checkpoint) + self.interp_shape = model.model_resolution + print(self.interp_shape) + self.model = model + self.model.eval() + + @torch.no_grad() + def forward( + self, + video, # (B, T, 3, H, W) Batch_size, time, rgb, height, width + # input prompt types: + # - None. Dense tracks are computed in this case. You can adjust *query_frame* to compute tracks starting from a specific frame. + # *backward_tracking=True* will compute tracks in both directions. + # - queries. Queried points of shape (B, N, 3) in format (t, x, y) for frame index and pixel coordinates. + # - grid_size. Grid of N*N points from the first frame. if segm_mask is provided, then computed only for the mask. + # You can adjust *query_frame* and *backward_tracking* for the regular grid in the same way as for dense tracks. + queries: torch.Tensor = None, + segm_mask: torch.Tensor = None, # Segmentation mask of shape (B, 1, H, W) + grid_size: int = 0, + grid_query_frame: int = 0, # only for dense and regular grid tracks + backward_tracking: bool = False, + ): + if queries is None and grid_size == 0: + tracks, visibilities = self._compute_dense_tracks( + video, + grid_query_frame=grid_query_frame, + backward_tracking=backward_tracking, + ) + else: + tracks, visibilities = self._compute_sparse_tracks( + video, + queries, + segm_mask, + grid_size, + add_support_grid=(grid_size == 0 or segm_mask is not None), + grid_query_frame=grid_query_frame, + backward_tracking=backward_tracking, + ) + + return tracks, visibilities + + # gpu dense inference time + # raft gpu comparison + # vision effects + # raft integrated + def _compute_dense_tracks(self, video, grid_query_frame, grid_size=80, backward_tracking=False): + *_, H, W = video.shape + grid_step = W // grid_size + grid_width = W // grid_step + grid_height = H // grid_step # set the whole video to grid_size number of grids + tracks = visibilities = None + grid_pts = torch.zeros((1, grid_width * grid_height, 3)).to(video.device) + # (batch_size, grid_number, t,x,y) + grid_pts[0, :, 0] = grid_query_frame + # iterate every grid + for offset in range(grid_step * grid_step): + print(f"step {offset} / {grid_step * grid_step}") + ox = offset % grid_step + oy = offset // grid_step + # initialize + # for example + # grid width = 4, grid height = 4, grid step = 10, ox = 1 + # torch.arange(grid_width) = [0,1,2,3] + # torch.arange(grid_width).repeat(grid_height) = [0,1,2,3,0,1,2,3,0,1,2,3] + # torch.arange(grid_width).repeat(grid_height) * grid_step = [0,10,20,30,0,10,20,30,0,10,20,30] + # get the location in the image + grid_pts[0, :, 1] = torch.arange(grid_width).repeat(grid_height) * grid_step + ox + grid_pts[0, :, 2] = ( + torch.arange(grid_height).repeat_interleave(grid_width) * grid_step + oy + ) + tracks_step, visibilities_step = self._compute_sparse_tracks( + video=video, + queries=grid_pts, + backward_tracking=backward_tracking, + ) + tracks = smart_cat(tracks, tracks_step, dim=2) + visibilities = smart_cat(visibilities, visibilities_step, dim=2) + + return tracks, visibilities + + def _compute_sparse_tracks( + self, + video, + queries, + segm_mask=None, + grid_size=0, + add_support_grid=False, + grid_query_frame=0, + backward_tracking=False, + ): + B, T, C, H, W = video.shape + + video = video.reshape(B * T, C, H, W) + # ? what is interpolate? + # 将video插值成interp_shape? + video = F.interpolate(video, tuple(self.interp_shape), mode="bilinear", align_corners=True) + video = video.reshape(B, T, 3, self.interp_shape[0], self.interp_shape[1]) + + if queries is not None: + B, N, D = queries.shape # batch_size, number of points, (t,x,y) + assert D == 3 + # query 缩放到( interp_shape - 1 ) / (W - 1) + # 插完值之后缩放 + queries = queries.clone() + queries[:, :, 1:] *= queries.new_tensor( + [ + (self.interp_shape[1] - 1) / (W - 1), + (self.interp_shape[0] - 1) / (H - 1), + ] + ) + # 生成grid + elif grid_size > 0: + grid_pts = get_points_on_a_grid(grid_size, self.interp_shape, device=video.device) + if segm_mask is not None: + segm_mask = F.interpolate(segm_mask, tuple(self.interp_shape), mode="nearest") + point_mask = segm_mask[0, 0][ + (grid_pts[0, :, 1]).round().long().cpu(), + (grid_pts[0, :, 0]).round().long().cpu(), + ].bool() + grid_pts = grid_pts[:, point_mask] + + queries = torch.cat( + [torch.ones_like(grid_pts[:, :, :1]) * grid_query_frame, grid_pts], + dim=2, + ).repeat(B, 1, 1) + + # 添加支持点 + + if add_support_grid: + grid_pts = get_points_on_a_grid( + self.support_grid_size, self.interp_shape, device=video.device + ) + grid_pts = torch.cat([torch.zeros_like(grid_pts[:, :, :1]), grid_pts], dim=2) + grid_pts = grid_pts.repeat(B, 1, 1) + queries = torch.cat([queries, grid_pts], dim=1) + + tracks, visibilities, __ = self.model.forward(video=video, queries=queries, iters=6) + + if backward_tracking: + tracks, visibilities = self._compute_backward_tracks( + video, queries, tracks, visibilities + ) + if add_support_grid: + queries[:, -self.support_grid_size**2 :, 0] = T - 1 + if add_support_grid: + tracks = tracks[:, :, : -self.support_grid_size**2] + visibilities = visibilities[:, :, : -self.support_grid_size**2] + thr = 0.9 + visibilities = visibilities > thr + + # correct query-point predictions + # see https://github.com/facebookresearch/co-tracker/issues/28 + + # TODO: batchify + for i in range(len(queries)): + queries_t = queries[i, : tracks.size(2), 0].to(torch.int64) + arange = torch.arange(0, len(queries_t)) + + # overwrite the predictions with the query points + tracks[i, queries_t, arange] = queries[i, : tracks.size(2), 1:] + + # correct visibilities, the query points should be visible + visibilities[i, queries_t, arange] = True + + tracks *= tracks.new_tensor( + [(W - 1) / (self.interp_shape[1] - 1), (H - 1) / (self.interp_shape[0] - 1)] + ) + return tracks, visibilities + + def _compute_backward_tracks(self, video, queries, tracks, visibilities): + inv_video = video.flip(1).clone() + inv_queries = queries.clone() + inv_queries[:, :, 0] = inv_video.shape[1] - inv_queries[:, :, 0] - 1 + + inv_tracks, inv_visibilities, __ = self.model(video=inv_video, queries=inv_queries, iters=6) + + inv_tracks = inv_tracks.flip(1) + inv_visibilities = inv_visibilities.flip(1) + arange = torch.arange(video.shape[1], device=queries.device)[None, :, None] + + mask = (arange < queries[:, None, :, 0]).unsqueeze(-1).repeat(1, 1, 1, 2) + + tracks[mask] = inv_tracks[mask] + visibilities[mask[:, :, :, 0]] = inv_visibilities[mask[:, :, :, 0]] + return tracks, visibilities + + +class CoTrackerOnlinePredictor(torch.nn.Module): + def __init__(self, checkpoint="./checkpoints/cotracker2.pth"): + super().__init__() + self.support_grid_size = 6 + model = build_cotracker(checkpoint) + self.interp_shape = model.model_resolution + self.step = model.window_len // 2 + self.model = model + self.model.eval() + + @torch.no_grad() + def forward( + self, + video_chunk, + is_first_step: bool = False, + queries: torch.Tensor = None, + grid_size: int = 10, + grid_query_frame: int = 0, + add_support_grid=False, + ): + B, T, C, H, W = video_chunk.shape + # Initialize online video processing and save queried points + # This needs to be done before processing *each new video* + if is_first_step: + self.model.init_video_online_processing() + if queries is not None: + B, N, D = queries.shape + assert D == 3 + queries = queries.clone() + queries[:, :, 1:] *= queries.new_tensor( + [ + (self.interp_shape[1] - 1) / (W - 1), + (self.interp_shape[0] - 1) / (H - 1), + ] + ) + elif grid_size > 0: + grid_pts = get_points_on_a_grid( + grid_size, self.interp_shape, device=video_chunk.device + ) + queries = torch.cat( + [torch.ones_like(grid_pts[:, :, :1]) * grid_query_frame, grid_pts], + dim=2, + ) + if add_support_grid: + grid_pts = get_points_on_a_grid( + self.support_grid_size, self.interp_shape, device=video_chunk.device + ) + grid_pts = torch.cat([torch.zeros_like(grid_pts[:, :, :1]), grid_pts], dim=2) + queries = torch.cat([queries, grid_pts], dim=1) + self.queries = queries + return (None, None) + + video_chunk = video_chunk.reshape(B * T, C, H, W) + video_chunk = F.interpolate( + video_chunk, tuple(self.interp_shape), mode="bilinear", align_corners=True + ) + video_chunk = video_chunk.reshape(B, T, 3, self.interp_shape[0], self.interp_shape[1]) + + tracks, visibilities, __ = self.model( + video=video_chunk, + queries=self.queries, + iters=6, + is_online=True, + ) + thr = 0.9 + return ( + tracks + * tracks.new_tensor( + [ + (W - 1) / (self.interp_shape[1] - 1), + (H - 1) / (self.interp_shape[0] - 1), + ] + ), + visibilities > thr, + ) diff --git a/cotracker/utils/__init__.py b/cotracker/utils/__init__.py index 5277f46..4547e07 100644 --- a/cotracker/utils/__init__.py +++ b/cotracker/utils/__init__.py @@ -1,5 +1,5 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/cotracker/utils/__pycache__/__init__.cpython-38.pyc b/cotracker/utils/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e143587ac2870b58ad8b896f16718438f9c4111 GIT binary patch literal 131 zcmWIL<>g`k0>-S$v{OL(F^Gc<7=auIATDMB5-AM944RC7D;bJF!U*D*hKp59a(+os sVsdtB5fZDkBr~TtCO$qhFS8^*Uaz3?7Kcr4eoARhsvXFL&p^xo0LP>qGynhq literal 0 HcmV?d00001 diff --git a/cotracker/utils/__pycache__/__init__.cpython-39.pyc b/cotracker/utils/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57b947a14ea58f55135c20e556356af6749ddfa7 GIT binary patch literal 135 zcmYe~<>g`k0>-S$v{OL(F^Gc<7=auIATDMB5-AM944RC7D;bJF!U*D*j(%=liGGTH wa(+osVsdtB5fZDkBr~U2KR!M)FS8^*Uaz3?7Kcr4eoARhsvXFb&p^xo05ZiMBLDyZ literal 0 HcmV?d00001 diff --git a/cotracker/utils/__pycache__/visualizer.cpython-38.pyc b/cotracker/utils/__pycache__/visualizer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f8f759c29633d74c2349d963141034c5cc94acf GIT binary patch literal 7729 zcmb_h&2t;cb)W9}z{~&y!4HWb#g!KO zNMHc=3|Atn;Z#hyoJy5a%EgCR>jHA~Wy|OM6{$*<4_!HIC9Wj8<=`sEmebBkG>pc;-S`A(r)7~wOWVs+NwcDvM>tIl=itMh0(!hK*> z7eq<;B6z^6iy{gn8EYxJC-Pl*$0-GJ>u0R!QQtjb5F28Yoo$6Z*(eLyYF`D z+mQ~;42+td#GD@;bBnoLPs9sV$h| zN5DU@{=)hvob-L1u+P8EzlnAXxNY_A)KTn+-LSq@zQg-Y-&OX&O}#-*xkEN&%-Y*P z%UAZ0cFZ>pSKgMgE7!WcC~n2uYag~m)Lm<~+R>G@N}zKsIzp>k4Y#ATCZoEDqz>M` z--vo?s~c;#CtGo0RxOmSGs& zl=V)OXx7ra)%lSn7r=*KzINlvoklm6m=wt=Y(H(alRF=_lKpzSbr{L@-hp1E#cF2P zwWjQLYV?YEVkrTOaaLp{7P2yP@SJDz45&Y{pFG1u(sdZH*>Hi5KRys@o157JGPxI5 zF~OEC#mg2jo?%Vi_r~<434O@|9bDhx%2T;*t{izgwUvFuV#sWU3HzbVEQNclI2&do zPiRXJ9S1ki*17I}+HS>B#gPu^+94sT=Aw4H)k`Anru)5iRPnVfx3{$0=ys(5ck80H zpHQ>il{yD@wYJ)kb_C6xX-Cbpw%@Duy12J$Nw&7{rZv&M7iSc60U21NH|A+pt%tY&scdl=D1q zWEsm;xLn3VyZ2fmy*pxzJVzvlSS43!kSAXvO3tvPb}&Zn%QYegG|hh}yk~E-(^*fOjSw zz+&=szy_hyp8^`!@k@RT5Ls0wEZEM~$c~)I#n(2_6}iX0v3PFP6E8)%9sjvsHqwb4 zG4nVyHqfmG;yf(n9PDH$E<88G74wf>u^<*7xYfK^iwZk4lV4Gsee8%OQAXeFm@1YZ zbFm`MJ#eZe5kfmI>hK#Awf|+X{<|_t_S@n_wWzoEBW*W(iFP_&ShQDZekr-zy;p0tWRk*?z%1!`V}g=eJEE1S z5j6q~9e)U)-1=Xkpf^^hyhif40fYa@9Dr; zf*MfyN7PNSBCito5&=5A{6hekdnbw7&1?}f$_${sY;`l#tYM~;v7WLOey!GuTWPIE zoe3$cmG_`BK8%FV3c$wS!-eD9Tp!Qc(5^A#n-%yx%++4TejOSU)@rxX16*ZlE@C&) zK@x3uaH?ci`{b`;x-ut8qn^(1?ML!pbc>;p1GBVVZFD>EP6-@Ut=W~e2Ik0mth z&jx|OJA_Mc=z?PHgjf;w2QulXK;?z^FxM}tQ2rbG@~TK)N%(DX-+rwa706mtP@pg8(r|zC++$fQl{O#zTAeWGjuNyh*fzE`B*l zB1ud((S~+qcONl=)9%LGn%&ek)H0Js5<4B@=Hmnx)JdwrI}_za;LwZesa`Y&dZe7) zIP?DU7QJd#Uc=+`M$I10ZfSG!Jp#0|Y~M7|Y+ifDUqG}b@7Dn=&FBTE#65smW`W2i z7Dz}{tfB)s_t_$Q9nO&>+~AD=%JD)@THv#JLj`U8{e>4e78royk`v<<~K$Pt)vLh2De;=S?>s&9AoqfcocB8ki zU4*N>1H@T;{q?GYP#`+(UBjzw?263AwTJX}ioZOITZfgYt2(wU>sQ-dIGE%bkny}j zXT8(=VuhKLnteQZhR@;bH%yF8lf_!vU8~Gh1M=mWiQ;J&fn8TN?#kC_|73kN?ZA)a3)eFptmdCfO> z?9ZxMa>>bSDTDbA1qc47!I`ijO;8adPcXRy8>ZaB&&AJ!sf8&uV{)hHeB=iH9#2|h zdX9^9tfVLvoA1x55|XaDK`=ymha~L#DXChZif{~2+WtHgtRP&32LFj&SnZkqf?9xL zaeTdHsL--iro?dm5sz;nIJS2EM@;^^ndR+r;NPc_BWL3@X)jR4=V+xNWVpZBKa~xFBrpU z4BZmR(fFo}%$0FE`;jB*jy!=h1vkYZvA^ywgVWUOKC}Dl8F8}HGiVLs&ZJ@nI2{KYeFATm&bFDsby<`^~@P4WHJF#|0Q)!y;P?4&y6I@ z+$#l7pshYfTg5%CsFGU7Nzp&F35P_Oa1Cd}@6#>c>jj~8ep=xgS_vm20T zIY!#(KBVE^PYsli!wVxhL{EA_omXq=8gApFK$3aHz!%0 zLyW&SR-%|pz=AClkV2x&Ht20RRCnk2UK_x$d%zW|B~o;2>clV z@(&t0Qtv>TG|@%jsGFF426+Rv2fv$CXAG8hYgyIA?zCcUck1^MnYQ5qHxP=NniC?| zdb}OUKc=Pg1SmGqIZUsoUwFf$RY)qMdaUPaCQR+gNQ_gK*^D&FVI;-XI-t6hsgz|N z|79ZDs1RhS*>BTZOwv`Sj?h%OO#C7#VX0i2^5VZSHrMPcnr&+KRn2Z`_JL+K%{DZ< zAwQrun1ad7hfxEuN5+Lj_Rvvv4{G3sd4L~I3|43sn;X##yvZ_V1R>RYV_(YASw{`S zPNdnQ)okKSoozWsFQbdC`e1x(3z^&L5Q5_AkMK+W8Ndn=H1GoA>dPqNA-3j@?W2@e z;vw_-3POtlUu28F_8b~hMv>-{O=DLvXT?75DQ10+j}e#gUBsw)e*PIpC{kh`Kg-TO z<7n|uef&I^qIkTENEVnu|EYfA*u}SIXIIAay_o{%(%bp=Tk)f=*wrO?1)X; zj11NhOn1MK(FB95I5loWJ03fOd>8Y`#mL*hQ8~?U%0g5D*(>r=n35pPQ6*$2JVNHW zcN-X1-gV@w<8!42oDR7zWj6*RX9&EE^^Qzk8}nWrkD>G+tCma>0ArdeMY#*^8Ksj4 zgponWbS@<D_%u^~QsiK&v z0HrhXa|9M9V3DwU086;wWF3f=Bbh3h>@m}U&qI#K`D1}OPrV$KCNDqp?2sLjrTivQZV@1_l7#@dSd9LdGH%;woFq<7siILz zqmTGU(jxbpC;;C^d1LzfbIy+$Xt19bR!@we(LdwxB;)U7#?K1t;f2ZA|DBBC{1=74 zww{jL{AuAL=6)(^Cs5+Ecx+b6atqHa&@m!Ld4G2Huv1Ta?JoX0+RAFd3#Oj19>?p= z{kTD8IQ+N3;~2rjq%Ch_e)X&wNshT2uTQFAiTpY`_1y8g`0I_{puOxV6Y95bzx!S$ z?a-Jv$(ANtJxJ_qas(b0zd^beBs9Djl`C0zYxy)q3`@3)NA!z literal 0 HcmV?d00001 diff --git a/cotracker/utils/__pycache__/visualizer.cpython-39.pyc b/cotracker/utils/__pycache__/visualizer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f029a3e5ae0ea779ccaa673ca8490ff5b736336e GIT binary patch literal 7706 zcmb_h+ix4$d7pFU!r_pjsEZ{^)>^XTY?f&%JC|8K|0D%klB@c}^ARqIx$aDUR0tNDL(1%4EwDnto21pZU`>Zbn-QCgei22wc-e#>6B`-8aCVI&RlJ-GhdrW-4gZ#qqZQ* z!WH=kthOjTQ2^$QD2f?;m)v309|3;f z_$E%)=ikBE-yK1hjmyTa(Kiz-Wk>9~@vX`Syl?gG)EwA}Gsva(kPR6#_TNFxP0bW+^IUs>iWrT<&36b=-iZkCx{hm z)p)D(6GP5}3cuOB)CrSI;!-0@BnAd@3hE`Tc6{k>EAIL2)?pwwy9a8KCamk7*PAlx z)M*)-DW<|;oR#o+timi-X7fxg0{av5$uoQ;U5GBW8#ci4&jmnreK$J*CO2cJOt2Lr z7i5;qGc>+1kCG)J1eJ%rN>AvSNW&EYPTWqa_>d;2aPc zRk_D2timnkfVR#jjx8^s?F0>pzvv`J9WlU4$ zvVxDY@3lm7dqfzyN+^p+CC^ePN3Ibjr%6&j7z6j^I-&E_QID7BQ9pg=ygoDa`F*s; zB@_lY$YQHFFQ?PK0?dii61R|^(x;`(q~L=^@JVVe8sG_Q*%&}`jpR<*JrMt^&=igz zCkNf2YG?bk{dV`ZuiOaN(hB{yvUY;RSN5*o>G-F&S@ead-wWsh1M!DgGv+7^CNH6E z5IFrSqH+yy{8JQxQDee@_FN0hzzS?UrY>!fd+chRXV)C@QjptopL?^OR$z&l$DY=L zb}cW?L08T}Q+i_kxe>OQe{72dvG~BQ6~snR+?$!aC9(9_63e23w%IXOtUTsoRh)fb z)yl$ybevb-dlRw$O@8x|4B}opiEm^=LyA=8q~8j6qI*w>3=j%8_U{ID(Spv}g(iv{ zKBOSuC2iridx0{W-B?+j2wLrPG`<|)j_%c)Eg2`!Bv4IiUaO$E-VSIcssxQZx{iMY zPj3CMQAzF8sN?2QveXvr4wn}aJ}?Cz63z-2fi;1Bm9V6h37h6n+TazV?xZftTxz0p zp*M_rKDAJKr0YpXd>O0Min}fBadh081txD-3-XW9B>#X4B0$Y0rQPWGU6t3Gpbk)y zAj(&%_+u)FYxLpeR{=rgTXE2CW|Qb%x&yUkvlE@NIrMPSuv5Cit=C&&E2-D703VZ- z8U+U;<3dTetO#iQ94;K+=H__Rma?@P->!P{b&OR`MtgJIW=S71EViaT0V@VZRs1gV8N|nh*5UYON7OeTGYm@JNjB zalxs_q{gVG$54M;a|3u`4|je1HlYKBHMIJ${DfbI)yLPCACXPRxS4c@>^<7qz!g|S z*i;T}V2r&y_z?OBJZYu*v>=>^xqd12kLYqH4Z4>ujgxR6d<%fB@dRyYgYz)#eoR>&h^jKa*?%eQFiS@|dUoZgt; zlinR|M*bNUw3BR~G*E9=Iodvel_u+}JT%K_fm7_A2ODON$2KO2Nh*wz1-^FKBD)M@ z$l+da#(!%$9w)AM*}TRAH-3KYW}Y1R2l6W@s;0_y1KH`pCp8=0p0eShb`RiJ@y*LM z3#LCfZB4x^ZS0EF!_|kF;lJ@3ht;WTI@ThaSK1MbNqiN+c+8=-+39|z%JeDq9-chI z?{M}#?NifWv5`a@)wx=p>~^M(coM;hi)7=rtYSSqSn()UZpXh9z;wl07n5fbC(7xC zJFQU28FW?F9vnDn1I*m>Ox|jR%mc3rpZtbKJBkBI6s_i`1m&bq)bd1+IxSJl>r;{o zT`TG>*0Xt)tu5jyqG=KQE@qE6P#9o%5&{pe^;cZC8$^8qk)On!fQjVLT&0k&M7<}srE-mS3%ft1O6SmFxtibLb@=RflwnNzqjR+IKp8j?~tiGyfzqrccclavQ@>0){Y;=Pp2rzIUC zZugf{W3Z4e(YSP3=y{IdqV_A;?_ye^P@2SksAW&&77c3IJCjz@JofEAG}HNP59xA~ zU%?*tuZA}HE3ui2iXG~Z0Oh10a!gnSo=JKZ^H(3p4@ecMx zX0>Tv!MTM=;ldp&J3~f`Q@H<|7CWGYl`ez!nB@_dZ;vU1S^hsL!}s3~zmr*%4>ydnHE+xV7hSYq+PC zw4AQsq-YOzYkeYhG(4_<) zu8;T-Ey=m`T)L60<9;>-0?H!>%F~>n$l!dk4)cFrlpfmsbC0Zjsd?iZaGDChd;{Yy zXr8v^J87AAuuHDt1#nauNiKbxa4XBnX9gEB`m<@7POP;fM>`v$4_1PPgq5A$(uPgG z4NZJ(aBIZ~=^)s{db1tf(*cV18DZ%yAC?|pTTqa@n`!d@dOZU^j;@U`hOvz}{w11~ zQVcpqmm7epFC%?)+lM9pE7TASA5e}-AS8yt{3dm+QSs+gkZDi|g}Mh)M}s!4Y(Z{ z6=E5oLD>g&q6U3H4=V&I6brM22?K4iz!?5Vt-0c<>i^5$^Ow~CzeglJJzDi z4stuL?Xiny3AiF#gE!;BhgqYxXMmqsX6DEjF%y64<>oQ>8arH{@@qaVQuz-$o?(3a zI=Gz3pSe0m1pOY06K+2xn~2{ic!@WV4AfqY*|+;nKbNopPs{<5yjm}@#HK(KTAKWh z|AcF9rY>|10>1%#m%fK&f_)ZtWPz-XojSVT_xpte4nNuonHD=@lR6_^bp);5FJ?Hw zU?ooV8{v-o&LFJCII=GC7GNYuGnj&pw1^-T;U~06KFLwSW6wK6fVzJR5N5t($ydhb zN{TogLRt!HG(>g~a0T-n=@d4`y)y1YQ9+gznfU!$D^*K!57g5_C+`zP3L%rZn4k_7 z2UJiVPin#Y96>s%LD@4c@|Z`*mY;HEGNn#`N5$WwP%MVKJS9@{3)G}V;Ijl#D07oKllGfm`Uw{Lh)Y9eLExc??zLt5ixuwCs%yYjQg+cZxS8o8vonolW1b&|yh>4Vg?AC5_TfUH)?xEMK#7sIXVHcse z5OmRlM7_s0@Qj%W!P!$QcN*h@y$)BOW2=b?tGe=wSR>1~$}a)TEImcrS(Qjr&2MdO z>2Pk8OCsd<4gA%VNv2KBgu8)A`3E#$P9bYZnY(&jd37Y>M6XS7v0mRtvSrj_#)*Xa zN1ij2+p8EVzl}mU+sNicQocu+x2PbilDPo6P>lYA(q>zd#7Uymlqk+qb%Bb@C`bXs zB-^futj6^7E6$GzsG(mK*G}}H-oNCqB;#i?;+MruZ++7De 0, 1].min(), - tracks[0, segm_mask > 0, 1].max(), - ) - norm = plt.Normalize(y_min, y_max) - for n in range(N): - if segm_mask[n] > 0: - color = self.color_map(norm(tracks[0, n, 1])) - color = np.array(color[:3])[None] * 255 - vector_colors[:, n] = np.repeat(color, T, axis=0) - - else: - # color changes with segm class - segm_mask = segm_mask.cpu() - color = np.zeros((segm_mask.shape[0], 3), dtype=np.float32) - color[segm_mask > 0] = np.array(self.color_map(1.0)[:3]) * 255.0 - color[segm_mask <= 0] = np.array(self.color_map(0.0)[:3]) * 255.0 - vector_colors = np.repeat(color[None], T, axis=0) - - # draw tracks - if self.tracks_leave_trace != 0: - for t in range(query_frame + 1, T): - first_ind = ( - max(0, t - self.tracks_leave_trace) if self.tracks_leave_trace >= 0 else 0 - ) - curr_tracks = tracks[first_ind : t + 1] - curr_colors = vector_colors[first_ind : t + 1] - if compensate_for_camera_motion: - diff = ( - tracks[first_ind : t + 1, segm_mask <= 0] - - tracks[t : t + 1, segm_mask <= 0] - ).mean(1)[:, None] - - curr_tracks = curr_tracks - diff - curr_tracks = curr_tracks[:, segm_mask > 0] - curr_colors = curr_colors[:, segm_mask > 0] - - res_video[t] = self._draw_pred_tracks( - res_video[t], - curr_tracks, - curr_colors, - ) - if gt_tracks is not None: - res_video[t] = self._draw_gt_tracks(res_video[t], gt_tracks[first_ind : t + 1]) - - # draw points - for t in range(query_frame, T): - img = Image.fromarray(np.uint8(res_video[t])) - for i in range(N): - coord = (tracks[t, i, 0], tracks[t, i, 1]) - visibile = True - if visibility is not None: - visibile = visibility[0, t, i] - if coord[0] != 0 and coord[1] != 0: - if not compensate_for_camera_motion or ( - compensate_for_camera_motion and segm_mask[i] > 0 - ): - img = draw_circle( - img, - coord=coord, - radius=int(self.linewidth * 2), - color=vector_colors[t, i].astype(int), - visible=visibile, - ) - res_video[t] = np.array(img) - - # construct the final rgb sequence - if self.show_first_frame > 0: - res_video = [res_video[0]] * self.show_first_frame + res_video[1:] - return torch.from_numpy(np.stack(res_video)).permute(0, 3, 1, 2)[None].byte() - - def _draw_pred_tracks( - self, - rgb: np.ndarray, # H x W x 3 - tracks: np.ndarray, # T x 2 - vector_colors: np.ndarray, - alpha: float = 0.5, - ): - T, N, _ = tracks.shape - rgb = Image.fromarray(np.uint8(rgb)) - for s in range(T - 1): - vector_color = vector_colors[s] - original = rgb.copy() - alpha = (s / T) ** 2 - for i in range(N): - coord_y = (int(tracks[s, i, 0]), int(tracks[s, i, 1])) - coord_x = (int(tracks[s + 1, i, 0]), int(tracks[s + 1, i, 1])) - if coord_y[0] != 0 and coord_y[1] != 0: - rgb = draw_line( - rgb, - coord_y, - coord_x, - vector_color[i].astype(int), - self.linewidth, - ) - if self.tracks_leave_trace > 0: - rgb = Image.fromarray( - np.uint8(add_weighted(np.array(rgb), alpha, np.array(original), 1 - alpha, 0)) - ) - rgb = np.array(rgb) - return rgb - - def _draw_gt_tracks( - self, - rgb: np.ndarray, # H x W x 3, - gt_tracks: np.ndarray, # T x 2 - ): - T, N, _ = gt_tracks.shape - color = np.array((211, 0, 0)) - rgb = Image.fromarray(np.uint8(rgb)) - for t in range(T): - for i in range(N): - gt_tracks = gt_tracks[t][i] - # draw a red cross - if gt_tracks[0] > 0 and gt_tracks[1] > 0: - length = self.linewidth * 3 - coord_y = (int(gt_tracks[0]) + length, int(gt_tracks[1]) + length) - coord_x = (int(gt_tracks[0]) - length, int(gt_tracks[1]) - length) - rgb = draw_line( - rgb, - coord_y, - coord_x, - color, - self.linewidth, - ) - coord_y = (int(gt_tracks[0]) - length, int(gt_tracks[1]) + length) - coord_x = (int(gt_tracks[0]) + length, int(gt_tracks[1]) - length) - rgb = draw_line( - rgb, - coord_y, - coord_x, - color, - self.linewidth, - ) - rgb = np.array(rgb) - return rgb +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +import os +import numpy as np +import imageio +import torch + +from matplotlib import cm +import torch.nn.functional as F +import torchvision.transforms as transforms +import matplotlib.pyplot as plt +from PIL import Image, ImageDraw + + +def read_video_from_path(path): + try: + reader = imageio.get_reader(path) + except Exception as e: + print("Error opening video file: ", e) + return None + frames = [] + for i, im in enumerate(reader): + frames.append(np.array(im)) + return np.stack(frames) + + +def draw_circle(rgb, coord, radius, color=(255, 0, 0), visible=True): + # Create a draw object + draw = ImageDraw.Draw(rgb) + # Calculate the bounding box of the circle + left_up_point = (coord[0] - radius, coord[1] - radius) + right_down_point = (coord[0] + radius, coord[1] + radius) + # Draw the circle + draw.ellipse( + [left_up_point, right_down_point], + fill=tuple(color) if visible else None, + outline=tuple(color), + ) + return rgb + + +def draw_line(rgb, coord_y, coord_x, color, linewidth): + draw = ImageDraw.Draw(rgb) + draw.line( + (coord_y[0], coord_y[1], coord_x[0], coord_x[1]), + fill=tuple(color), + width=linewidth, + ) + return rgb + + +def add_weighted(rgb, alpha, original, beta, gamma): + return (rgb * alpha + original * beta + gamma).astype("uint8") + + +class Visualizer: + def __init__( + self, + save_dir: str = "./results", + grayscale: bool = False, + pad_value: int = 0, + fps: int = 10, + mode: str = "rainbow", # 'cool', 'optical_flow' + linewidth: int = 2, + show_first_frame: int = 10, + tracks_leave_trace: int = 0, # -1 for infinite + ): + self.mode = mode + self.save_dir = save_dir + if mode == "rainbow": + self.color_map = cm.get_cmap("gist_rainbow") + elif mode == "cool": + self.color_map = cm.get_cmap(mode) + self.show_first_frame = show_first_frame + self.grayscale = grayscale + self.tracks_leave_trace = tracks_leave_trace + self.pad_value = pad_value + self.linewidth = linewidth + self.fps = fps + + def visualize( + self, + video: torch.Tensor, # (B,T,C,H,W) + tracks: torch.Tensor, # (B,T,N,2) + visibility: torch.Tensor = None, # (B, T, N, 1) bool + gt_tracks: torch.Tensor = None, # (B,T,N,2) + segm_mask: torch.Tensor = None, # (B,1,H,W) + filename: str = "video", + writer=None, # tensorboard Summary Writer, used for visualization during training + step: int = 0, + query_frame: int = 0, + save_video: bool = True, + compensate_for_camera_motion: bool = False, + ): + if compensate_for_camera_motion: + assert segm_mask is not None + if segm_mask is not None: + coords = tracks[0, query_frame].round().long() + segm_mask = segm_mask[0, query_frame][coords[:, 1], coords[:, 0]].long() + + video = F.pad( + video, + (self.pad_value, self.pad_value, self.pad_value, self.pad_value), + "constant", + 255, + ) + tracks = tracks + self.pad_value + + if self.grayscale: + transform = transforms.Grayscale() + video = transform(video) + video = video.repeat(1, 1, 3, 1, 1) + + res_video = self.draw_tracks_on_video( + video=video, + tracks=tracks, + visibility=visibility, + segm_mask=segm_mask, + gt_tracks=gt_tracks, + query_frame=query_frame, + compensate_for_camera_motion=compensate_for_camera_motion, + ) + if save_video: + self.save_video(res_video, filename=filename, writer=writer, step=step) + return res_video + + def save_video(self, video, filename, writer=None, step=0): + if writer is not None: + writer.add_video( + filename, + video.to(torch.uint8), + global_step=step, + fps=self.fps, + ) + else: + os.makedirs(self.save_dir, exist_ok=True) + wide_list = list(video.unbind(1)) + wide_list = [wide[0].permute(1, 2, 0).cpu().numpy() for wide in wide_list] + + # Prepare the video file path + save_path = os.path.join(self.save_dir, f"{filename}.mp4") + + # Create a writer object + video_writer = imageio.get_writer(save_path, fps=self.fps) + + # Write frames to the video file + for frame in wide_list[2:-1]: + video_writer.append_data(frame) + + video_writer.close() + + print(f"Video saved to {save_path}") + + def draw_tracks_on_video( + self, + video: torch.Tensor, + tracks: torch.Tensor, + visibility: torch.Tensor = None, + segm_mask: torch.Tensor = None, + gt_tracks=None, + query_frame: int = 0, + compensate_for_camera_motion=False, + ): + B, T, C, H, W = video.shape + _, _, N, D = tracks.shape + + assert D == 2 + assert C == 3 + video = video[0].permute(0, 2, 3, 1).byte().detach().cpu().numpy() # S, H, W, C + tracks = tracks[0].long().detach().cpu().numpy() # S, N, 2 + if gt_tracks is not None: + gt_tracks = gt_tracks[0].detach().cpu().numpy() + + res_video = [] + + # process input video + for rgb in video: + res_video.append(rgb.copy()) + vector_colors = np.zeros((T, N, 3)) + + if self.mode == "optical_flow": + import flow_vis + + vector_colors = flow_vis.flow_to_color(tracks - tracks[query_frame][None]) + elif segm_mask is None: + if self.mode == "rainbow": + y_min, y_max = ( + tracks[query_frame, :, 1].min(), + tracks[query_frame, :, 1].max(), + ) + norm = plt.Normalize(y_min, y_max) + for n in range(N): + color = self.color_map(norm(tracks[query_frame, n, 1])) + color = np.array(color[:3])[None] * 255 + vector_colors[:, n] = np.repeat(color, T, axis=0) + else: + # color changes with time + for t in range(T): + color = np.array(self.color_map(t / T)[:3])[None] * 255 + vector_colors[t] = np.repeat(color, N, axis=0) + else: + if self.mode == "rainbow": + vector_colors[:, segm_mask <= 0, :] = 255 + + y_min, y_max = ( + tracks[0, segm_mask > 0, 1].min(), + tracks[0, segm_mask > 0, 1].max(), + ) + norm = plt.Normalize(y_min, y_max) + for n in range(N): + if segm_mask[n] > 0: + color = self.color_map(norm(tracks[0, n, 1])) + color = np.array(color[:3])[None] * 255 + vector_colors[:, n] = np.repeat(color, T, axis=0) + + else: + # color changes with segm class + segm_mask = segm_mask.cpu() + color = np.zeros((segm_mask.shape[0], 3), dtype=np.float32) + color[segm_mask > 0] = np.array(self.color_map(1.0)[:3]) * 255.0 + color[segm_mask <= 0] = np.array(self.color_map(0.0)[:3]) * 255.0 + vector_colors = np.repeat(color[None], T, axis=0) + + # draw tracks + if self.tracks_leave_trace != 0: + for t in range(query_frame + 1, T): + first_ind = ( + max(0, t - self.tracks_leave_trace) if self.tracks_leave_trace >= 0 else 0 + ) + curr_tracks = tracks[first_ind : t + 1] + curr_colors = vector_colors[first_ind : t + 1] + if compensate_for_camera_motion: + diff = ( + tracks[first_ind : t + 1, segm_mask <= 0] + - tracks[t : t + 1, segm_mask <= 0] + ).mean(1)[:, None] + + curr_tracks = curr_tracks - diff + curr_tracks = curr_tracks[:, segm_mask > 0] + curr_colors = curr_colors[:, segm_mask > 0] + + res_video[t] = self._draw_pred_tracks( + res_video[t], + curr_tracks, + curr_colors, + ) + if gt_tracks is not None: + res_video[t] = self._draw_gt_tracks(res_video[t], gt_tracks[first_ind : t + 1]) + + # draw points + for t in range(query_frame, T): + img = Image.fromarray(np.uint8(res_video[t])) + for i in range(N): + coord = (tracks[t, i, 0], tracks[t, i, 1]) + visibile = True + if visibility is not None: + visibile = visibility[0, t, i] + if coord[0] != 0 and coord[1] != 0: + if not compensate_for_camera_motion or ( + compensate_for_camera_motion and segm_mask[i] > 0 + ): + img = draw_circle( + img, + coord=coord, + radius=int(self.linewidth * 2), + color=vector_colors[t, i].astype(int), + visible=visibile, + ) + res_video[t] = np.array(img) + + # construct the final rgb sequence + if self.show_first_frame > 0: + res_video = [res_video[0]] * self.show_first_frame + res_video[1:] + return torch.from_numpy(np.stack(res_video)).permute(0, 3, 1, 2)[None].byte() + + def _draw_pred_tracks( + self, + rgb: np.ndarray, # H x W x 3 + tracks: np.ndarray, # T x 2 + vector_colors: np.ndarray, + alpha: float = 0.5, + ): + T, N, _ = tracks.shape + rgb = Image.fromarray(np.uint8(rgb)) + for s in range(T - 1): + vector_color = vector_colors[s] + original = rgb.copy() + alpha = (s / T) ** 2 + for i in range(N): + coord_y = (int(tracks[s, i, 0]), int(tracks[s, i, 1])) + coord_x = (int(tracks[s + 1, i, 0]), int(tracks[s + 1, i, 1])) + if coord_y[0] != 0 and coord_y[1] != 0: + rgb = draw_line( + rgb, + coord_y, + coord_x, + vector_color[i].astype(int), + self.linewidth, + ) + if self.tracks_leave_trace > 0: + rgb = Image.fromarray( + np.uint8(add_weighted(np.array(rgb), alpha, np.array(original), 1 - alpha, 0)) + ) + rgb = np.array(rgb) + return rgb + + def _draw_gt_tracks( + self, + rgb: np.ndarray, # H x W x 3, + gt_tracks: np.ndarray, # T x 2 + ): + T, N, _ = gt_tracks.shape + color = np.array((211, 0, 0)) + rgb = Image.fromarray(np.uint8(rgb)) + for t in range(T): + for i in range(N): + gt_tracks = gt_tracks[t][i] + # draw a red cross + if gt_tracks[0] > 0 and gt_tracks[1] > 0: + length = self.linewidth * 3 + coord_y = (int(gt_tracks[0]) + length, int(gt_tracks[1]) + length) + coord_x = (int(gt_tracks[0]) - length, int(gt_tracks[1]) - length) + rgb = draw_line( + rgb, + coord_y, + coord_x, + color, + self.linewidth, + ) + coord_y = (int(gt_tracks[0]) - length, int(gt_tracks[1]) + length) + coord_x = (int(gt_tracks[0]) + length, int(gt_tracks[1]) - length) + rgb = draw_line( + rgb, + coord_y, + coord_x, + color, + self.linewidth, + ) + rgb = np.array(rgb) + return rgb diff --git a/cotracker/version.py b/cotracker/version.py index 4bdf9b4..d1cdb8f 100644 --- a/cotracker/version.py +++ b/cotracker/version.py @@ -1,8 +1,8 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - - -__version__ = "2.0.0" +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +__version__ = "2.0.0" diff --git a/notebooks/demo.ipynb b/notebooks/demo.ipynb index 2a09fef..0a3dcfe 100644 --- a/notebooks/demo.ipynb +++ b/notebooks/demo.ipynb @@ -54,21 +54,34 @@ "metadata": {}, "outputs": [], "source": [ - "!git clone https://github.com/facebookresearch/co-tracker\n", - "%cd co-tracker\n", - "!pip install -e .\n", - "!pip install opencv-python einops timm matplotlib moviepy flow_vis\n", - "!mkdir checkpoints\n", - "%cd checkpoints\n", - "!wget https://huggingface.co/facebook/cotracker/resolve/main/cotracker2.pth" + "# !git clone https://github.com/facebookresearch/co-tracker\n", + "# %cd co-tracker\n", + "# !pip install -e .\n", + "# !pip install opencv-python einops timm matplotlib moviepy flow_vis\n", + "# !mkdir checkpoints\n", + "# %cd checkpoints\n", + "# !wget https://huggingface.co/facebook/cotracker/resolve/main/cotracker2.pth" ] }, { "cell_type": "code", "execution_count": 2, "id": "1745a859-71d4-4ec3-8ef3-027cabe786d4", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2024-07-29T20:52:14.487553Z", + "start_time": "2024-07-29T20:52:12.423999Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/mnt/d/cotracker\n" + ] + } + ], "source": [ "%cd ..\n", "import os\n", @@ -79,6 +92,30 @@ "from IPython.display import HTML" ] }, + { + "cell_type": "code", + "execution_count": 3, + "id": "44342f62abc0ec1e", + "metadata": { + "ExecuteTime": { + "end_time": "2024-07-29T20:52:31.688043Z", + "start_time": "2024-07-29T20:52:31.668043Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "CUDA available\n" + ] + } + ], + "source": [ + "if torch.cuda.is_available():\n", + " print('CUDA available')" + ] + }, { "cell_type": "markdown", "id": "7894bd2d-2099-46fa-8286-f0c56298ecd1", @@ -89,31 +126,31 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "id": "f1f9ca4d-951e-49d2-8844-91f7bcadfecd", "metadata": {}, "outputs": [], "source": [ - "video = read_video_from_path('./assets/apple.mp4')\n", + "video = read_video_from_path('./assets/F1_shorts.mp4')\n", "video = torch.from_numpy(video).permute(0, 3, 1, 2)[None].float()" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "id": "fb4c2e9d-0e85-4c10-81a2-827d0759bf87", "metadata": {}, "outputs": [ { "data": { "text/html": [ - "" + "" ], "text/plain": [ "" ] }, - "execution_count": 4, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -124,7 +161,7 @@ " video_url = f\"data:video/mp4;base64,{b64encode(video_file).decode()}\"\n", " return HTML(f\"\"\"\"\"\")\n", " \n", - "show_video(\"./assets/apple.mp4\")" + "show_video(\"./assets/F1_shorts.mp4\")" ] }, { @@ -137,10 +174,26 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 7, "id": "d59ac40b-bde8-46d4-bd57-4ead939f22ca", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/mnt/d/cotracker/cotracker/models/build_cotracker.py:29: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n", + " state_dict = torch.load(f, map_location=\"cpu\")\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(384, 512)\n" + ] + } + ], "source": [ "from cotracker.predictor import CoTrackerPredictor\n", "\n", @@ -151,18 +204,6 @@ ")" ] }, - { - "cell_type": "code", - "execution_count": 6, - "id": "3f2a4485", - "metadata": {}, - "outputs": [], - "source": [ - "if torch.cuda.is_available():\n", - " model = model.cuda()\n", - " video = video.cuda()" - ] - }, { "cell_type": "markdown", "id": "e8398155-6dae-4ff0-95f3-dbb52ac70d20", @@ -173,10 +214,22 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "id": "17fcaae9-7b3c-474c-977a-cce08a09d580", "metadata": {}, - "outputs": [], + "outputs": [ + { + "ename": "", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[1;31m在当前单元格或上一个单元格中执行代码时 Kernel 崩溃。\n", + "\u001b[1;31m请查看单元格中的代码,以确定故障的可能原因。\n", + "\u001b[1;31m单击此处了解详细信息。\n", + "\u001b[1;31m有关更多详细信息,请查看 Jupyter log。" + ] + } + ], "source": [ "pred_tracks, pred_visibility = model(video, grid_size=30)" ] @@ -191,23 +244,19 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 7, "id": "7e793ce0-7b77-46ca-a629-155a6a146000", "metadata": {}, "outputs": [ { - "name": "stderr", - "output_type": "stream", - "text": [ - "IMAGEIO FFMPEG_WRITER WARNING: input image is not divisible by macro_block_size=16, resizing from (1496, 920) to (1504, 928) to ensure video compatibility with most codecs and players. To prevent resizing, make your input image divisible by the macro_block_size or set the macro_block_size to 1 (risking incompatibility).\n", - "[swscaler @ 0x5e9d040] Warning: data is not aligned! This can lead to a speed loss\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Video saved to ./videos/teaser.mp4\n" + "ename": "", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[1;31m在当前单元格或上一个单元格中执行代码时 Kernel 崩溃。\n", + "\u001b[1;31m请查看单元格中的代码,以确定故障的可能原因。\n", + "\u001b[1;31m单击此处了解详细信息。\n", + "\u001b[1;31m有关更多详细信息,请查看 Jupyter log。" ] } ], @@ -218,22 +267,20 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 1, "id": "2d0733ba-8fe1-4cd4-b963-2085202fba13", "metadata": {}, "outputs": [ { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" + "ename": "NameError", + "evalue": "name 'show_video' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[1], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mshow_video\u001b[49m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m./videos/teaser.mp4\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n", + "\u001b[0;31mNameError\u001b[0m: name 'show_video' is not defined" + ] } ], "source": [ @@ -264,7 +311,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "id": "c6422e7c-8c6f-4269-92c3-245344afe35b", "metadata": {}, "outputs": [], @@ -289,21 +336,10 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "id": "d7141079-d7e0-40b3-b031-a28879c4bd6d", "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAn4AAAHVCAYAAABv4/bQAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAABH7klEQVR4nO3de3SU5b3+/2uSkCEYJiHBnCDBqAhFUBEkRgXr13xBSj0UPGxKleIBq9FCUb6IW6HSLbC11lMV7V5usFsrikWtbGxXBEQtkZOgHDSCG0zEHBTMBJScP78/+OXZjAlKyJA5PO/XWrOa3M89M/fdJ17rYo4eMzMBAAAg6sWEegEAAADoHBQ/AAAAl6D4AQAAuATFDwAAwCUofgAAAC5B8QMAAHAJih8AAIBLUPwAAABcguIHAADgEhQ/AAAAl6D4oUMWLVokj8fT5uWuu+4K9fKCpq6uTjNmzFBWVpYSEhKUl5enoqKiUC8LQJC4IcsOHDig2bNn65JLLlFKSoo8Ho8WLVp0xPkfffSRLrnkEiUmJiolJUXXXnutvvzyy85bMI6LuFAvANFhzpw5ys3NDRgbOHBgiFYTfL/85S/18ssva+rUqerbt68WLVqkn/zkJ1q1apUuuOCCUC8PQJBEc5Z99dVXmjNnjnJycnTmmWfqrbfeOuLczz//XCNGjFBSUpLmzp2rAwcO6Pe//722bNmidevWKT4+vvMWjqCi+CEoRo8eraFDhx7V3NraWsXHxysmJjIecF63bp0WL16sBx98UHfeeack6brrrtPAgQP1//7f/9OaNWtCvEIAwRLNWZaZmany8nJlZGRow4YNOuecc444d+7cufrmm2+0ceNG5eTkSJKGDRum//t//68WLVqkyZMnd9ayEWSR8deKiPXWW2/J4/Fo8eLFuueee9SrVy9169ZNNTU12rdvn+68804NGjRIiYmJ8vl8Gj16tD744IM2b+Oll17Sfffdp169eql79+668sor5ff7VVdXp6lTpyotLU2JiYmaNGmS6urqWq3lueee05AhQ5SQkKCUlBT9y7/8i8rKyn5wDy+//LJiY2MDgq5r16664YYbVFxcfFS3ASCyRUOWeb1eZWRkHNV+//rXv+qnP/2pU/okqaCgQKeddppeeumlo7oNhCce8UNQ+P1+ffXVVwFjPXv2dH7+3e9+p/j4eN15552qq6tTfHy8tm/frldffVVXXXWVcnNzVVlZqaeffloXXnihtm/frqysrIDbmzdvnhISEnTXXXdp586devzxx9WlSxfFxMTo66+/1m9/+1u99957WrRokXJzczVr1iznuvfff7/uvfdeXX311brxxhv15Zdf6vHHH9eIESO0adMmJScnH3FvmzZt0mmnnSafzxcwPmzYMEnS5s2blZ2dfaz/1wEII9GcZUdrz549qqqqavORz2HDhmn58uUdvg+EkAEdsHDhQpPU5sXMbNWqVSbJTj75ZPv2228DrltbW2tNTU0BY7t27TKv12tz5sxxxlpuY+DAgVZfX++Mjx8/3jwej40ePTrgNvLz861Pnz7O77t377bY2Fi7//77A+Zt2bLF4uLiWo1/1+mnn27/5//8n1bj27ZtM0n21FNPfe/1AYQ/N2TZ4davX2+SbOHChUc89uc//7nVsenTp5skq62tPer7QnjhqV4ExRNPPKGioqKAy+EmTpyohISEgDGv1+u8NqapqUl79+5VYmKi+vXrp/fff7/VfVx33XXq0qWL83teXp7MTNdff33AvLy8PJWVlamxsVGStHTpUjU3N+vqq6/WV1995VwyMjLUt29frVq16nv3dvDgQXm93lbjXbt2dY4DiA7RnGVHqyXTyL3oxFO9CIphw4Z97wuiv/suOUlqbm7Wo48+qieffFK7du1SU1OTcyw1NbXV/MNfayJJSUlJktTqadakpCQ1NzfL7/crNTVVO3bskJmpb9++ba7t8ABuS0JCQpuvs6mtrXWOA4gO0ZxlR6sl08i96ETxQ6doKyTmzp2re++9V9dff71+97vfKSUlRTExMZo6daqam5tbzY+NjW3zto80bmaSDoWyx+PRG2+80ebcxMTE7117Zmam9uzZ02q8vLxcklq9fgdA9IrkLDtamZmZkv434w5XXl6ulJSUNh8NRGSg+CFkXn75ZV100UV65plnAsarq6sDXkzdUaeccorMTLm5uTrttNPaff2zzjpLq1atUk1NTcAbPNauXescB+BekZJlR6tXr1468cQTtWHDhlbH1q1bR+ZFOF7jh5CJjY11/iXbYsmSJW0+utYRY8eOVWxsrO67775W92dm2rt37/de/8orr1RTU5P+9Kc/OWN1dXVauHCh8vLyeEcv4HKRkmXtMW7cOC1btizgY2JWrFihTz75RFdddVXQ7gedj0f8EDI//elPNWfOHE2aNEnnnXeetmzZoueff14nn3xyUO/nlFNO0b/9279p5syZ2r17t6644gp1795du3bt0iuvvKLJkyc7H8zclry8PF111VWaOXOmqqqqdOqpp+rZZ5/V7t27W/0LH4D7REqWSdIf//hHVVdX64svvpAkvf766/r8888lSbfffrvzesO7775bS5Ys0UUXXaQpU6bowIEDevDBBzVo0CBNmjQpqPtC56L4IWTuvvtuffPNN/rLX/6iF198UWeffbb++7//+7h8L+Zdd92l0047TQ8//LDuu+8+SYdeSD1y5EhddtllP3j9P//5z7r33nv1X//1X/r66691xhlnaNmyZRoxYkTQ1wogskRSlv3+97/XZ5995vy+dOlSLV26VJL0i1/8IuCNJqtXr9a0adN01113KT4+XmPGjNFDDz3E6/sinMe++3gxAAAAohKv8QMAAHAJih8AAIBLUPwAAABcImTF74knntBJJ52krl27Ki8vT+vWrQvVUgAg6Mg4AOEoJMXvxRdf1LRp0zR79my9//77OvPMMzVq1ChVVVWFYjkAEFRkHIBwFZJ39ebl5emcc87RH//4R0mHvoYmOztbt99+e5tvf6+rqwv4zsDm5mbt27dPqamp8ng8nbZuAOHLzLR//35lZWUpJia0r2Ih4wAEU1DzzTpZXV2dxcbG2iuvvBIwft1119lll13W5nVmz55tkrhw4cLlBy9lZWWdkGRHRsZx4cLleF2CkW+d/gHOX331lZqampSenh4wnp6ero8//rjN68ycOVPTpk1zfvf7/crJyVFZWVnAd6cCcK+amhplZ2ere/fuIV0HGQcg2IKZbxHxzR1er7fNTwr3+XyEIoAAkfjUKBkH4GgEI986/YUwPXv2VGxsrCorKwPGKysrlZGR0dnLAYCgIuMAhLNOL37x8fEaMmSIVqxY4Yw1NzdrxYoVys/P7+zlAEBQkXEAwllInuqdNm2aJk6cqKFDh2rYsGF65JFH9M0332jSpEmhWA4ABBUZByBchaT4XXPNNfryyy81a9YsVVRU6KyzztLf//73Vi+GBoBIRMYBCFch+Ry/jqqpqVFSUpL8fj8vfAYgKbpyIZr2AqDjgpkJfFcvAACAS1D8AAAAXILiBwAA4BIUPwAAAJeg+AEAALgExQ8AAMAlKH4AAAAuQfEDAABwCYofAACAS1D8AAAAXILiBwAA4BIUPwAAAJeg+AEAALgExQ8AAMAlKH4AAAAuQfEDAABwCYofAACAS1D8AAAAXILiBwAA4BIUPwAAAJeg+AEAALgExQ8AAMAlKH4AAAAuQfEDAABwCYofAACAS1D8AAAAXILiBwAA4BIUPwAAAJeg+AEAALgExQ8AAMAlKH4AAAAuQfEDAABwCYofAACAS1D8AAAAXILiBwAA4BIUPwAAAJdoV/GbN2+ezjnnHHXv3l1paWm64oorVFJSEjCntrZWhYWFSk1NVWJiosaNG6fKysqAOaWlpRozZoy6deumtLQ0TZ8+XY2NjR3fDQB0ABkHINq1q/itXr1ahYWFeu+991RUVKSGhgaNHDlS33zzjTPnN7/5jV5//XUtWbJEq1ev1hdffKGxY8c6x5uamjRmzBjV19drzZo1evbZZ7Vo0SLNmjUreLsCgGNAxgGIetYBVVVVJslWr15tZmbV1dXWpUsXW7JkiTPno48+MklWXFxsZmbLly+3mJgYq6iocOYsWLDAfD6f1dXVHdX9+v1+k2R+v78jywcQRY5HLpBxAMJBMDOhQ6/x8/v9kqSUlBRJ0saNG9XQ0KCCggJnTv/+/ZWTk6Pi4mJJUnFxsQYNGqT09HRnzqhRo1RTU6Nt27a1eT91dXWqqakJuADA8UbGAYg2x1z8mpubNXXqVJ1//vkaOHCgJKmiokLx8fFKTk4OmJuenq6KigpnzuGB2HK85Vhb5s2bp6SkJOeSnZ19rMsGgKNCxgGIRsdc/AoLC7V161YtXrw4mOtp08yZM+X3+51LWVnZcb9PAO5GxgGIRnHHcqXbbrtNy5Yt09tvv63evXs74xkZGaqvr1d1dXXAv4grKyuVkZHhzFm3bl3A7bW8I65lznd5vV55vd5jWSoAtBsZByBatesRPzPTbbfdpldeeUUrV65Ubm5uwPEhQ4aoS5cuWrFihTNWUlKi0tJS5efnS5Ly8/O1ZcsWVVVVOXOKiork8/k0YMCAjuwFADqEjAMQ7dr1iF9hYaH+8pe/6LXXXlP37t2d16skJSUpISFBSUlJuuGGGzRt2jSlpKTI5/Pp9ttvV35+vs4991xJ0siRIzVgwABde+21euCBB1RRUaF77rlHhYWF/IsXQEiRcQCiXnveAiypzcvChQudOQcPHrRbb73VevToYd26dbOf/exnVl5eHnA7u3fvttGjR1tCQoL17NnT7rjjDmtoaDjqdfBRBwC+Kxi5QMYBCEfBzASPmVnn182OqampUVJSkvx+v3w+X6iXAyAMRFMuRNNeAHRcMDOB7+oFAABwCYofAACAS1D8AAAAXILiBwAA4BIUPwAAAJeg+AEAALgExQ8AAMAlKH4AAAAuQfEDAABwCYofAACAS1D8AAAAXILiBwAA4BIUPwAAAJeg+AEAALgExQ8AAMAlKH4AAAAuQfEDAABwCYofAACAS1D8AAAAXILiBwAA4BIUPwAAAJeg+AEAALgExQ8AAMAlKH4AAAAuERfqBSAEmpqkd96RysulzExp+HApNjbUqwIAAMcZxc9tli6VpkyRPv/8f8d695YefVQaOzZ06wIAAMcdT/W6ydKl0pVXBpY+Sdqz59D40qWhWRcAAOgUFD+3aGo69EifWetjLWNTpx6aBwAAohLFzy3eeaf1I32HM5PKyg7NAwAAUYni5xbl5cGdBwAAIg7Fzy0yM4M7DwAARByKn1sMH37o3bseT9vHPR4pO/vQPAAAEJUofm4RG3voI1uk1uWv5fdHHuHz/AAAiGIUPzcZO1Z6+WWpV6/A8d69D43zOX4AAEQ1PsDZbcaOlS6/nG/uAADAhSh+bhQbK/34x6FeBQAA6GQ81QsAAOASHSp+8+fPl8fj0dSpU52x2tpaFRYWKjU1VYmJiRo3bpwqKysDrldaWqoxY8aoW7duSktL0/Tp09XY2NiRpQBAUJFvAKLRMRe/9evX6+mnn9YZZ5wRMP6b3/xGr7/+upYsWaLVq1friy++0NjD3jTQ1NSkMWPGqL6+XmvWrNGzzz6rRYsWadasWce+CwAIIvINQNSyY7B//37r27evFRUV2YUXXmhTpkwxM7Pq6mrr0qWLLVmyxJn70UcfmSQrLi42M7Ply5dbTEyMVVRUOHMWLFhgPp/P6urqjur+/X6/STK/338sywcQhYKVC6HOt2DuBUB0CGYmHNMjfoWFhRozZowKCgoCxjdu3KiGhoaA8f79+ysnJ0fFxcWSpOLiYg0aNEjp6enOnFGjRqmmpkbbtm1r8/7q6upUU1MTcAGA46Gz800i4wB0nna/q3fx4sV6//33tX79+lbHKioqFB8fr+Tk5IDx9PR0VVRUOHMOD8WW4y3H2jJv3jzdd9997V0qALRLKPJNIuMAdJ52PeJXVlamKVOm6Pnnn1fXrl2P15pamTlzpvx+v3MpKyvrtPsG4A6hyjeJjAPQedpV/DZu3KiqqiqdffbZiouLU1xcnFavXq3HHntMcXFxSk9PV319vaqrqwOuV1lZqYyMDElSRkZGq3fBtfzeMue7vF6vfD5fwAUAgilU+SaRcQA6T7uK38UXX6wtW7Zo8+bNzmXo0KGaMGGC83OXLl20YsUK5zolJSUqLS1Vfn6+JCk/P19btmxRVVWVM6eoqEg+n08DBgwI0rYAoH3INwBu0K7X+HXv3l0DBw4MGDvhhBOUmprqjN9www2aNm2aUlJS5PP5dPvttys/P1/nnnuuJGnkyJEaMGCArr32Wj3wwAOqqKjQPffco8LCQnm93iBtCwDah3wD4AZB/8q2hx9+WDExMRo3bpzq6uo0atQoPfnkk87x2NhYLVu2TLfccovy8/N1wgknaOLEiZozZ06wlwIAQUW+AYh0HjOzUC+ivWpqapSUlCS/389rYQBIiq5ciKa9AOi4YGYC39ULAADgEhQ/AAAAl6D4AQAAuATFDwAAwCUofgAAAC5B8QMAAHAJih8AAIBLUPwAAABcguIHAADgEhQ/AAAAl6D4AQAAuERcqBcAAICrNDVJ77wjlZdLmZnS8OFSbGyoVwWXoPgBANBZli6VpkyRPv/8f8d695YefVQaOzZ064Jr8FQvAACdYelS6corA0ufJO3Zc2h86dLQrAuuQvEDAOB4a2o69EifWetjLWNTpx6aBxxHFD8AAI63d95p/Ujf4cyksrJD84DjiOIHAMDxVl4e3HnAMaL4AQBwvGVmBncecIwofgAAHG/Dhx96967H0/Zxj0fKzj40DziOKH4AABxvsbGHPrJFal3+Wn5/5BE+zw/HHcUPAIDOMHas9PLLUq9egeO9ex8a53P80An4AGcAADrL2LHS5ZfzzR0IGYofAACdKTZW+vGPQ70KuBRP9QIAALgExQ8AAMAlKH4AAAAuQfEDAABwCYofAACAS1D8AAAAXILiBwAA4BIUPwAAAJeg+AEAALgExQ8AAMAlKH4AAAAuQfEDAABwCYofAACAS1D8AAAAXKLdxW/Pnj36xS9+odTUVCUkJGjQoEHasGGDc9zMNGvWLGVmZiohIUEFBQXasWNHwG3s27dPEyZMkM/nU3Jysm644QYdOHCg47sBgA4g3wBEu3YVv6+//lrnn3++unTpojfeeEPbt2/XQw89pB49ejhzHnjgAT322GN66qmntHbtWp1wwgkaNWqUamtrnTkTJkzQtm3bVFRUpGXLluntt9/W5MmTg7crAGgn8g2AK1g7zJgxwy644IIjHm9ubraMjAx78MEHnbHq6mrzer32wgsvmJnZ9u3bTZKtX7/emfPGG2+Yx+OxPXv2tHm7tbW15vf7nUtZWZlJMr/f357lA4hifr+/Q7kQqnwzI+MAfL+O5tvh2vWI39/+9jcNHTpUV111ldLS0jR48GD9x3/8h3N8165dqqioUEFBgTOWlJSkvLw8FRcXS5KKi4uVnJysoUOHOnMKCgoUExOjtWvXtnm/8+bNU1JSknPJzs5uz7IB4AeFKt8kMg5A52lX8fuf//kfLViwQH379tU//vEP3XLLLfr1r3+tZ599VpJUUVEhSUpPTw+4Xnp6unOsoqJCaWlpAcfj4uKUkpLizPmumTNnyu/3O5eysrL2LBsAflCo8k0i4wB0nrj2TG5ubtbQoUM1d+5cSdLgwYO1detWPfXUU5o4ceJxWaAkeb1eeb3e43b7ABCqfJPIOACdp12P+GVmZmrAgAEBYz/60Y9UWloqScrIyJAkVVZWBsyprKx0jmVkZKiqqirgeGNjo/bt2+fMAYDORr4BcIN2Fb/zzz9fJSUlAWOffPKJ+vTpI0nKzc1VRkaGVqxY4RyvqanR2rVrlZ+fL0nKz89XdXW1Nm7c6MxZuXKlmpublZeXd8wbAYCOIN8AuEJ73gmybt06i4uLs/vvv9927Nhhzz//vHXr1s2ee+45Z878+fMtOTnZXnvtNfvwww/t8ssvt9zcXDt48KAz55JLLrHBgwfb2rVr7d1337W+ffva+PHjj3odwXx3C4Do0NFcCJd8C8ZeAESXYGZCu4qfmdnrr79uAwcONK/Xa/3797c//elPAcebm5vt3nvvtfT0dPN6vXbxxRdbSUlJwJy9e/fa+PHjLTEx0Xw+n02aNMn2799/1GsgFAF8VzByIRzyLVh7ARA9gpkJHjOz0D3eeGxqamqUlJQkv98vn88X6uUACAPRlAvRtBcAHRfMTOC7egEAAFyC4gcAAOASFD8AAACXoPgBAAC4BMUPAADAJSh+AAAALkHxAwAAcAmKHwAAgEtQ/AAAAFyC4gcAAOASFD8AAACXoPgBAAC4BMUPAADAJSh+AAAALkHxAwAAcAmKHwAAgEtQ/AAAAFyC4gcAAOASFD8AAACXoPgBAAC4RFyoF3AszEySVFNTE+KVAAgXLXnQkg+RjIwDcLhg5ltEFr+9e/dKkrKzs0O8EgDhZv/+/UpKSgr1MjqEjAPQlmDkW0QWv5SUFElSaWlpxAd8TU2NsrOzVVZWJp/PF+rlHDP2EX6iZS9Huw8z0/79+5WVldWJqzs+oiXj3PY3GAmiZS/Rsg/p6PYSzHyLyOIXE3PopYlJSUkRf8Jb+Hy+qNgL+wg/0bKXo9lHJJekw0VbxrnpbzBSRMteomUf0g/vJVj5xps7AAAAXILiBwAA4BIRWfy8Xq9mz54tr9cb6qV0WLTshX2En2jZS7Tsoz2iZc/sI/xEy16iZR9S5+/FY9Hw2QcAAAD4QRH5iB8AAADaj+IHAADgEhQ/AAAAl6D4AQAAuEREFr8nnnhCJ510krp27aq8vDytW7cu1EtyzJs3T+ecc466d++utLQ0XXHFFSopKQmY8+Mf/1gejyfg8qtf/SpgTmlpqcaMGaNu3bopLS1N06dPV2NjY2duRb/97W9brbN///7O8draWhUWFio1NVWJiYkaN26cKisrw24fJ510Uqt9eDweFRYWSgrv8/H222/r0ksvVVZWljwej1599dWA42amWbNmKTMzUwkJCSooKNCOHTsC5uzbt08TJkyQz+dTcnKybrjhBh04cCBgzocffqjhw4era9euys7O1gMPPNBp+2hoaNCMGTM0aNAgnXDCCcrKytJ1112nL774IuA22jqP8+fP79R9dIZwzjcpejIuWvJNityMi5Z8+6G9hF3GWYRZvHixxcfH23/+53/atm3b7KabbrLk5GSrrKwM9dLMzGzUqFG2cOFC27p1q23evNl+8pOfWE5Ojh04cMCZc+GFF9pNN91k5eXlzsXv9zvHGxsbbeDAgVZQUGCbNm2y5cuXW8+ePW3mzJmdupfZs2fb6aefHrDOL7/80jn+q1/9yrKzs23FihW2YcMGO/fcc+28884Lu31UVVUF7KGoqMgk2apVq8wsvM/H8uXL7V//9V9t6dKlJsleeeWVgOPz58+3pKQke/XVV+2DDz6wyy67zHJzc+3gwYPOnEsuucTOPPNMe++99+ydd96xU0891caPH+8c9/v9lp6ebhMmTLCtW7faCy+8YAkJCfb00093yj6qq6utoKDAXnzxRfv444+tuLjYhg0bZkOGDAm4jT59+ticOXMCztPh/111xj6Ot3DPN7PoybhoyTezyM24aMm3H9pLuGVcxBW/YcOGWWFhofN7U1OTZWVl2bx580K4qiOrqqoySbZ69Wpn7MILL7QpU6Yc8TrLly+3mJgYq6iocMYWLFhgPp/P6urqjudyA8yePdvOPPPMNo9VV1dbly5dbMmSJc7YRx99ZJKsuLjYzMJnH981ZcoUO+WUU6y5udnMIud8fDdMmpubLSMjwx588EFnrLq62rxer73wwgtmZrZ9+3aTZOvXr3fmvPHGG+bxeGzPnj1mZvbkk09ajx49AvYyY8YM69evX6fsoy3r1q0zSfbZZ585Y3369LGHH374iNfp7H0cD5GWb2aRm3HRmm9mkZlx0ZJvbe2lLaHMuIh6qre+vl4bN25UQUGBMxYTE6OCggIVFxeHcGVH5vf7Jf3vl663eP7559WzZ08NHDhQM2fO1LfffuscKy4u1qBBg5Senu6MjRo1SjU1Ndq2bVvnLPz/t2PHDmVlZenkk0/WhAkTVFpaKknauHGjGhoaAs5F//79lZOT45yLcNpHi/r6ej333HO6/vrr5fF4nPFIOR+H27VrlyoqKgLOQVJSkvLy8gLOQXJysoYOHerMKSgoUExMjNauXevMGTFihOLj4505o0aNUklJib7++utO2k0gv98vj8ej5OTkgPH58+crNTVVgwcP1oMPPhjwVFQ47qM9IjHfpMjOuGjLNyl6Mi6a800KbcbFdXj1neirr75SU1NTwB+nJKWnp+vjjz8O0aqOrLm5WVOnTtX555+vgQMHOuM///nP1adPH2VlZenDDz/UjBkzVFJSoqVLl0qSKioq2txjy7HOkpeXp0WLFqlfv34qLy/Xfffdp+HDh2vr1q2qqKhQfHx8qz/a9PR0Z43hso/Dvfrqq6qurtYvf/lLZyxSzsd3tdx3W2s7/BykpaUFHI+Li1NKSkrAnNzc3Fa30XKsR48ex2X9R1JbW6sZM2Zo/PjxAV9Y/utf/1pnn322UlJStGbNGs2cOVPl5eX6wx/+4Kw1nPbRXpGWb1JkZ1w05psUPRkXrfkmhT7jIqr4RZrCwkJt3bpV7777bsD45MmTnZ8HDRqkzMxMXXzxxfr00091yimndPYyj2j06NHOz2eccYby8vLUp08fvfTSS0pISAjhyo7dM888o9GjRysrK8sZi5Tz4QYNDQ26+uqrZWZasGBBwLFp06Y5P59xxhmKj4/XzTffrHnz5kXF1zZFokjOuGjMN4mMC3fhkHER9VRvz549FRsb2+qdVZWVlcrIyAjRqtp22223admyZVq1apV69+79vXPz8vIkSTt37pQkZWRktLnHlmOhkpycrNNOO007d+5URkaG6uvrVV1dHTDn8HMRbvv47LPP9Oabb+rGG2/83nmRcj5a7vv7/nvIyMhQVVVVwPHGxkbt27cv7M5TSyB+9tlnKioqCviXcFvy8vLU2Nio3bt3SwqffRyrSMo3KfoyLtLzTYqujIu2fJPCJ+MiqvjFx8dryJAhWrFihTPW3NysFStWKD8/P4Qr+19mpttuu02vvPKKVq5c2eph2bZs3rxZkpSZmSlJys/P15YtWwL+oFv+SAYMGHBc1n00Dhw4oE8//VSZmZkaMmSIunTpEnAuSkpKVFpa6pyLcNvHwoULlZaWpjFjxnzvvEg5H7m5ucrIyAg4BzU1NVq7dm3AOaiurtbGjRudOStXrlRzc7MT/vn5+Xr77bfV0NDgzCkqKlK/fv067WmQlkDcsWOH3nzzTaWmpv7gdTZv3qyYmBjnqZ5w2EdHREK+SdGbcZGeb1J0ZVw05ZsUZhnXrreChIHFixeb1+u1RYsW2fbt223y5MmWnJwc8G6kULrlllssKSnJ3nrrrYC3ZH/77bdmZrZz506bM2eObdiwwXbt2mWvvfaanXzyyTZixAjnNlreWj9y5EjbvHmz/f3vf7cTTzyx0z8m4I477rC33nrLdu3aZf/85z+toKDAevbsaVVVVWZ26OMOcnJybOXKlbZhwwbLz8+3/Pz8sNuH2aF3R+bk5NiMGTMCxsP9fOzfv982bdpkmzZtMkn2hz/8wTZt2uS8E2z+/PmWnJxsr732mn344Yd2+eWXt/lxB4MHD7a1a9fau+++a3379g34uIPq6mpLT0+3a6+91rZu3WqLFy+2bt26BfXjDr5vH/X19XbZZZdZ7969bfPmzQH/3bS8e23NmjX28MMP2+bNm+3TTz+15557zk488US77rrrOnUfx1u455tZ9GRcNOWbWWRmXLTk2w/tJdwyLuKKn5nZ448/bjk5ORYfH2/Dhg2z9957L9RLckhq87Jw4UIzMystLbURI0ZYSkqKeb1eO/XUU2369OkBn6lkZrZ7924bPXq0JSQkWM+ePe2OO+6whoaGTt3LNddcY5mZmRYfH2+9evWya665xnbu3OkcP3jwoN16663Wo0cP69atm/3sZz+z8vLysNuHmdk//vEPk2QlJSUB4+F+PlatWtXm39PEiRPN7NBHHtx7772Wnp5uXq/XLr744lZ73Lt3r40fP94SExPN5/PZpEmTbP/+/QFzPvjgA7vgggvM6/Var169bP78+Z22j127dh3xv5uWzyHbuHGj5eXlWVJSknXt2tV+9KMf2dy5c622trZT99EZwjnfzKIn46Ip38wiM+OiJd9+aC/hlnEeM7Ojf3wQAAAAkSqiXuMHAACAY0fxAwAAcAmKHwAAgEtQ/AAAAFyC4gcAAOASFD8AAACXoPgBAAC4BMUPAADAJSh+AAAALkHxAwAAcAmKHwAAgEtQ/AAAAFyC4gcAAOASFD8AAACXoPgBAAC4BMUPAADAJSh+AAAALkHxAwAAcAmKH47aokWL5PF42rzcddddoV5eUKxfv1633XabTj/9dJ1wwgnKycnR1VdfrU8++aTN+R999JEuueQSJSYmKiUlRddee62+/PLLTl41gI5yQ75t27ZNV111lU4++WR169ZNPXv21IgRI/T666+3OZ98i05xoV4AIs+cOXOUm5sbMDZw4MAQrSa4/v3f/13//Oc/ddVVV+mMM85QRUWF/vjHP+rss8/We++9F7DPzz//XCNGjFBSUpLmzp2rAwcO6Pe//722bNmidevWKT4+PoQ7AXAsojnfPvvsM+3fv18TJ05UVlaWvv32W/31r3/VZZddpqefflqTJ0925pJvUcyAo7Rw4UKTZOvXrz/q6xw8eNCampqO46qC65///KfV1dUFjH3yySfm9XptwoQJAeO33HKLJSQk2GeffeaMFRUVmSR7+umnO2W9AILDDfnWlsbGRjvzzDOtX79+AePkW/TiqV4EzVtvvSWPx6PFixfrnnvuUa9evdStWzfV1NRo3759uvPOOzVo0CAlJibK5/Np9OjR+uCDD9q8jZdeekn33XefevXqpe7du+vKK6+U3+9XXV2dpk6dqrS0NCUmJmrSpEmqq6trtZbnnntOQ4YMUUJCglJSUvQv//IvKisr+8E9nHfeea3+Jdu3b1+dfvrp+uijjwLG//rXv+qnP/2pcnJynLGCggKddtppeumll9rzfx2AMBcN+daW2NhYZWdnq7q6OmCcfItePNWLdvP7/frqq68Cxnr27On8/Lvf/U7x8fG68847VVdXp/j4eG3fvl2vvvqqrrrqKuXm5qqyslJPP/20LrzwQm3fvl1ZWVkBtzdv3jwlJCTorrvu0s6dO/X444+rS5cuiomJ0ddff63f/va3eu+997Ro0SLl5uZq1qxZznXvv/9+3Xvvvbr66qt144036ssvv9Tjjz+uESNGaNOmTUpOTm7Xfs1MlZWVOv30052xPXv2qKqqSkOHDm01f9iwYVq+fHm77gNAeHBDvn3zzTc6ePCg/H6//va3v+mNN97QNddc4xwn36JcqB9yRORoeSqkrYuZ2apVq0ySnXzyyfbtt98GXLe2trbVUyK7du0yr9drc+bMccZabmPgwIFWX1/vjI8fP948Ho+NHj064Dby8/OtT58+zu+7d++22NhYu//++wPmbdmyxeLi4lqNH43/+q//Mkn2zDPPOGPr1683SfbnP/+51fzp06ebJKutrW33fQEIDTfl28033+zsLSYmxq688krbt2+fc5x8i2484od2e+KJJ3Taaacd8fjEiROVkJAQMOb1ep2fm5qaVF1drcTERPXr10/vv/9+q9u47rrr1KVLF+f3vLw8vfDCC7r++usD5uXl5emxxx5TY2Oj4uLitHTpUjU3N+vqq68O+Fd7RkaG+vbtq1WrVunuu+8+6r1+/PHHKiwsVH5+viZOnOiMHzx4sNW+WnTt2tWZ09ZxAOHLDfk2depUXXnllfriiy/00ksvqampSfX19c5x8i26UfzQbsOGDWvzKYAW331HnCQ1Nzfr0Ucf1ZNPPqldu3apqanJOZaamtpq/uGvK5GkpKQkSVJ2dnar8ebmZvn9fqWmpmrHjh0yM/Xt27fNtR0etj+koqJCY8aMUVJSkl5++WXFxsY6x1qCv63X39TW1gbMARA53JBv/fv3V//+/SUdKqEjR47UpZdeqrVr18rj8ZBvUY7ih6BrKxDmzp2re++9V9dff71+97vfKSUlRTExMZo6daqam5tbzT+8ZB3NuJlJOhTAHo9Hb7zxRptzExMTj2oPfr9fo0ePVnV1td55551Wr9HJzMyUJJWXl7e6bnl5uVJSUvjXMBCFoiHfvuvKK6/UzTffrE8++UT9+vUj36IcxQ+d4uWXX9ZFF12kZ555JmC8uro64IXTHXXKKafIzJSbm/u9T9d8n9raWl166aX65JNP9Oabb2rAgAGt5vTq1UsnnniiNmzY0OrYunXrdNZZZx3TfQOIPJGUb21peWrX7/dLIt+iHR/ngk4RGxvr/Ku1xZIlS7Rnz56g3s/YsWMVGxur++67r9X9mZn27t37vddvamrSNddco+LiYi1ZskT5+flHnDtu3DgtW7Ys4GMUVqxYoU8++URXXXVVxzYCIGJESr5VVVW1GmtoaNCf//xnJSQkBPwjl3yLXjzih07x05/+VHPmzNGkSZN03nnnacuWLXr++ed18sknB/V+TjnlFP3bv/2bZs6cqd27d+uKK65Q9+7dtWvXLr3yyiuaPHmy7rzzziNe/4477tDf/vY3XXrppdq3b5+ee+65gOO/+MUvnJ/vvvtuLVmyRBdddJGmTJmiAwcO6MEHH9SgQYM0adKkoO4LQPiKlHy7+eabVVNToxEjRqhXr16qqKjQ888/r48//lgPPfRQwFPF5Fv0ovihU9x999365ptv9Je//EUvvviizj77bP33f//3cfkOzLvuukunnXaaHn74Yd13332SDr1oeuTIkbrsssu+97qbN2+WJL3++uttfn/l4cUvOztbq1ev1rRp03TXXXcpPj5eY8aM0UMPPcTrXwAXiZR8u+aaa/TMM89owYIF2rt3r7p3764hQ4bo3//931tdl3yLXh777uPFAAAAiEq8xg8AAMAlKH4AAAAuQfEDAABwiZAVvyeeeEInnXSSunbtqry8PK1bty5USwGAoCPjAISjkBS/F198UdOmTdPs2bP1/vvv68wzz9SoUaPa/IwhAIg0ZByAcBWSd/Xm5eXpnHPO0R//+EdJh76GJjs7W7fffvtxefs7AHQmMg5AuOr0z/Grr6/Xxo0bNXPmTGcsJiZGBQUFKi4ubvM6dXV1AV8W3dzcrH379ik1NVUej+e4rxlA+DMz7d+/X1lZWYqJCd3Ll8k4AMEWzHzr9OL31VdfqampSenp6QHj6enp+vjjj9u8zrx585wPqgSA71NWVqbevXuH7P7JOADHSzDyLSK+uWPmzJmaNm2a87vf71dOTo7Kysrk8/lCuDIA4aKmpkbZ2dnq3r17qJfSbmQcgO8TzHzr9OLXs2dPxcbGqrKyMmC8srJSGRkZbV7H6/W2+RUxPp+PUAQQINRPjZJxAI6XYORbp78QJj4+XkOGDNGKFSucsebmZq1YsUL5+fmdvRwACCoyDkA4C8lTvdOmTdPEiRM1dOhQDRs2TI888oi++eYbTZo0KRTLAYCgIuMAhKuQFL9rrrlGX375pWbNmqWKigqdddZZ+vvf/97qxdAAEInIOADhKiSf49dRNTU1SkpKkt/v5/UvACRFVy5E014AdFwwM4Hv6gUAAHAJih8AAIBLUPwAAABcguIHAADgEhQ/AAAAl6D4AQAAuATFDwAAwCUofgAAAC5B8QMAAHAJih8AAIBLUPwAAABcguIHAADgEhQ/AAAAl6D4AQAAuATFDwAAwCUofgAAAC5B8QMAAHAJih8AAIBLUPwAAABcguIHAADgEhQ/AAAAl6D4AQAAuATFDwAAwCUofgAAAC5B8QMAAHAJih8AAIBLUPwAAABcguIHAADgEhQ/AAAAl6D4AQAAuATFDwAAwCUofgAAAC5B8QMAAHAJih8AAIBLUPwAAABcIi7UCwAAAFGsqUl65x2pvFzKzJSGD5diY0O9Ktei+AEAgONj6VJpyhTp88//d6x3b+nRR6WxY0O3Lhdr11O98+bN0znnnKPu3bsrLS1NV1xxhUpKSgLm1NbWqrCwUKmpqUpMTNS4ceNUWVkZMKe0tFRjxoxRt27dlJaWpunTp6uxsbHjuwGADiDjgCBaulS68srA0idJe/YcGl+6NDTrcrl2Fb/Vq1ersLBQ7733noqKitTQ0KCRI0fqm2++ceb85je/0euvv64lS5Zo9erV+uKLLzT2sFbf1NSkMWPGqL6+XmvWrNGzzz6rRYsWadasWcHbFQAcAzIOCJKmpkOP9Jm1PtYyNnXqoXnoXNYBVVVVJslWr15tZmbV1dXWpUsXW7JkiTPno48+MklWXFxsZmbLly+3mJgYq6iocOYsWLDAfD6f1dXVtXk/tbW15vf7nUtZWZlJMr/f35HlA4gifr8/6LlAxgHHaNUqs0MV7/svq1aFeqURIZj51qF39fr9fklSSkqKJGnjxo1qaGhQQUGBM6d///7KyclRcXGxJKm4uFiDBg1Senq6M2fUqFGqqanRtm3b2ryfefPmKSkpyblkZ2d3ZNkAcFTIOOAYlZcHdx6C5piLX3Nzs6ZOnarzzz9fAwcOlCRVVFQoPj5eycnJAXPT09NVUVHhzDk8EFuOtxxry8yZM+X3+51LWVnZsS4bAI4KGQd0QGZmcOchaI75Xb2FhYXaunWr3n333WCup01er1der/e43w8AtCDjgA4YPvzQu3f37Gn7dX4ez6Hjw4d3/tpc7pge8bvtttu0bNkyrVq1Sr1793bGMzIyVF9fr+rq6oD5lZWVysjIcOZ89x1wLb+3zAGAUCLjgA6KjT30kS3SoZJ3uJbfH3mEz/MLgXYVPzPTbbfdpldeeUUrV65Ubm5uwPEhQ4aoS5cuWrFihTNWUlKi0tJS5efnS5Ly8/O1ZcsWVVVVOXOKiork8/k0YMCAjuwFADqEjAOCaOxY6eWXpV69Asd79z40zuf4hYTHrK3HYNt266236i9/+Ytee+019evXzxlPSkpSQkKCJOmWW27R8uXLtWjRIvl8Pt1+++2SpDVr1kg69FEHZ511lrKysvTAAw+ooqJC1157rW688UbNnTv3qNZRU1OjpKQk+f1++Xy+o94sgOgVjFwg44DjgG/u6LCgZkJ73gIsqc3LwoULnTkHDx60W2+91Xr06GHdunWzn/3sZ1ZeXh5wO7t377bRo0dbQkKC9ezZ0+644w5raGg46nUcj49tABDZgpELZByAcBTMTGjXI37hgn8NA/iuaMqFaNoLgI4LZiZ06HP8AAAAEDkofgAAAC5B8QMAAHAJih8AAIBLUPwAAABcguIHAADgEhQ/AAAAl6D4AQAAuATFDwAAwCUofgAAAC5B8QMAAHAJih8AAIBLUPwAAABcguIHAADgEhQ/AAAAl6D4AQAAuATFDwAAwCUofgAAAC5B8QMAAHAJih8AAIBLUPwAAABcguIHAADgEhQ/AAAAl6D4AQAAuATFDwAAwCUofgAAAC5B8QMAAHAJih8AAIBLUPwAAABcguIHAADgEhQ/AAAAl6D4AQAAuATFDwAAwCUofgAAAC5B8QMAAHAJih8AAIBLUPwAAABcokPFb/78+fJ4PJo6daozVltbq8LCQqWmpioxMVHjxo1TZWVlwPVKS0s1ZswYdevWTWlpaZo+fboaGxs7shQACCryDUA0Oubit379ej399NM644wzAsZ/85vf6PXXX9eSJUu0evVqffHFFxo7dqxzvKmpSWPGjFF9fb3WrFmjZ599VosWLdKsWbOOfRcAEETkG4CoZcdg//791rdvXysqKrILL7zQpkyZYmZm1dXV1qVLF1uyZIkz96OPPjJJVlxcbGZmy5cvt5iYGKuoqHDmLFiwwHw+n9XV1bV5f7W1teb3+51LWVmZSTK/338sywcQhfx+f1ByobPzzYyMA/D9gpVvZmbH9IhfYWGhxowZo4KCgoDxjRs3qqGhIWC8f//+ysnJUXFxsSSpuLhYgwYNUnp6ujNn1KhRqqmp0bZt29q8v3nz5ikpKcm5ZGdnH8uyAeAHdXa+SWQcgM7T7uK3ePFivf/++5o3b16rYxUVFYqPj1dycnLAeHp6uioqKpw5h4diy/GWY22ZOXOm/H6/cykrK2vvsgHgB4Ui3yQyDkDniWvP5LKyMk2ZMkVFRUXq2rXr8VpTK16vV16vt9PuD4D7hCrfJDIOQOdp1yN+GzduVFVVlc4++2zFxcUpLi5Oq1ev1mOPPaa4uDilp6ervr5e1dXVAderrKxURkaGJCkjI6PVu+Bafm+ZAwCdjXwD4AbtKn4XX3yxtmzZos2bNzuXoUOHasKECc7PXbp00YoVK5zrlJSUqLS0VPn5+ZKk/Px8bdmyRVVVVc6coqIi+Xw+DRgwIEjbAoD2Id8AuEG7nurt3r27Bg4cGDB2wgknKDU11Rm/4YYbNG3aNKWkpMjn8+n2229Xfn6+zj33XEnSyJEjNWDAAF177bV64IEHVFFRoXvuuUeFhYU81QEgZMg3AG7QruJ3NB5++GHFxMRo3Lhxqqur06hRo/Tkk086x2NjY7Vs2TLdcsstys/P1wknnKCJEydqzpw5wV4KAAQV+QYg0nnMzEK9iPaqqalRUlKS/H6/fD5fqJcDIAxEUy5E014AdFwwM4Hv6gUAAHAJih8AAIBLUPwAAABcguIHAADgEhQ/AAAAl6D4AQAAuATFDwAAwCUofgAAAC5B8QMAAHAJih8AAIBLUPwAAABcguIHAADgEhQ/AAAAl6D4AQAAuATFDwAAwCUofgAAAC5B8QMAAHAJih8AAIBLUPwAAABcguIHAADgEhQ/AAAAl6D4AQAAuATFDwAAwCUofgAAAC5B8QMAAHAJih8AAIBLUPwAAABcguIHAADgEhQ/AAAAl6D4AQAAuATFDwAAwCUofgAAAC5B8QMAAHAJih8AAIBLUPwAAABcIi7UCwDCRlOT9M47Unm5lJkpDR8uxcaGelUAAARNux/x27Nnj37xi18oNTVVCQkJGjRokDZs2OAcNzPNmjVLmZmZSkhIUEFBgXbs2BFwG/v27dOECRPk8/mUnJysG264QQcOHOj4boBjtXSpdNJJ0kUXST//+aH/PemkQ+NwDfINQLRrV/H7+uuvdf7556tLly564403tH37dj300EPq0aOHM+eBBx7QY489pqeeekpr167VCSecoFGjRqm2ttaZM2HCBG3btk1FRUVatmyZ3n77bU2ePDl4uwLaY+lS6corpc8/Dxzfs+fQOOXPFcg3AK5g7TBjxgy74IILjni8ubnZMjIy7MEHH3TGqqurzev12gsvvGBmZtu3bzdJtn79emfOG2+8YR6Px/bs2XNU6/D7/SbJ/H5/e5YPtNbYaNa7t5nU9sXjMcvOPjQPYa2juRAu+WZGxgEIFMxMaNcjfn/72980dOhQXXXVVUpLS9PgwYP1H//xH87xXbt2qaKiQgUFBc5YUlKS8vLyVFxcLEkqLi5WcnKyhg4d6swpKChQTEyM1q5d2+b91tXVqaamJuACBMU777R+pO9wZlJZ2aF5iGqhyjeJjAPQedpV/P7nf/5HCxYsUN++ffWPf/xDt9xyi37961/r2WeflSRVVFRIktLT0wOul56e7hyrqKhQWlpawPG4uDilpKQ4c75r3rx5SkpKci7Z2dntWTZwZOXlwZ2HiBWqfJPIOACdp13Fr7m5WWeffbbmzp2rwYMHa/Lkybrpppv01FNPHa/1SZJmzpwpv9/vXMrKyo7r/cFFMjODOw8RK1T5JpFxADpPu4pfZmamBgwYEDD2ox/9SKWlpZKkjIwMSVJlZWXAnMrKSudYRkaGqqqqAo43NjZq3759zpzv8nq98vl8ARcgKIYPl3r3ljyeto97PFJ29qF5iGqhyjeJjAPQedpV/M4//3yVlJQEjH3yySfq06ePJCk3N1cZGRlasWKFc7ympkZr165Vfn6+JCk/P1/V1dXauHGjM2flypVqbm5WXl7eMW8EOCaxsdKjjx76+bvlr+X3Rx7h8/xcgHwD4ArteSfIunXrLC4uzu6//37bsWOHPf/889atWzd77rnnnDnz58+35ORke+211+zDDz+0yy+/3HJzc+3gwYPOnEsuucQGDx5sa9eutXfffdf69u1r48ePP+p18I43BN1f/9r63b3Z2YfGERE6mgvhkm/B2AuA6BLMTGhX8TMze/31123gwIHm9Xqtf//+9qc//SngeHNzs917772Wnp5uXq/XLr74YispKQmYs3fvXhs/frwlJiaaz+ezSZMm2f79+496DYQijovGRrNVq8z+8pdD/8tHuESUYORCOORbsPYCIHoEMxM8Zmahe7zx2NTU1CgpKUl+v5/XwgCQFF25EE17AdBxwcyEdn9lGwAAACITxQ8AAMAlKH4AAAAuQfEDAABwCYofAACAS1D8AAAAXILiBwAA4BIUPwAAAJeg+AEAALgExQ8AAMAlKH4AAAAuQfEDAABwCYofAACAS1D8AAAAXILiBwAA4BIUPwAAAJeg+AEAALgExQ8AAMAlKH4AAAAuQfEDAABwibhQL+BYmJkkqaamJsQrARAuWvKgJR8iGRkH4HDBzLeILH579+6VJGVnZ4d4JQDCzf79+5WUlBTqZXQIGQegLcHIt4gsfikpKZKk0tLSiA/4mpoaZWdnq6ysTD6fL9TLOWbsI/xEy16Odh9mpv379ysrK6sTV3d8REvGue1vMBJEy16iZR/S0e0lmPkWkcUvJubQSxOTkpIi/oS38Pl8UbEX9hF+omUvR7OPSC5Jh4u2jHPT32CkiJa9RMs+pB/eS7DyjTd3AAAAuATFDwAAwCUisvh5vV7Nnj1bXq831EvpsGjZC/sIP9Gyl2jZR3tEy57ZR/iJlr1Eyz6kzt+Lx6Lhsw8AAADwgyLyET8AAAC0H8UPAADAJSh+AAAALkHxAwAAcAmKHwAAgEtEZPF74okndNJJJ6lr167Ky8vTunXrQr0kx7x583TOOeeoe/fuSktL0xVXXKGSkpKAOT/+8Y/l8XgCLr/61a8C5pSWlmrMmDHq1q2b0tLSNH36dDU2NnbmVvTb3/621Tr79+/vHK+trVVhYaFSU1OVmJiocePGqbKyMuz2cdJJJ7Xah8fjUWFhoaTwPh9vv/22Lr30UmVlZcnj8ejVV18NOG5mmjVrljIzM5WQkKCCggLt2LEjYM6+ffs0YcIE+Xw+JScn64YbbtCBAwcC5nz44YcaPny4unbtquzsbD3wwAOdto+GhgbNmDFDgwYN0gknnKCsrCxdd911+uKLLwJuo63zOH/+/E7dR2cI53yToifjoiXfpMjNuGjJtx/aS9hlnEWYxYsXW3x8vP3nf/6nbdu2zW666SZLTk62ysrKUC/NzMxGjRplCxcutK1bt9rmzZvtJz/5ieXk5NiBAwecORdeeKHddNNNVl5e7lz8fr9zvLGx0QYOHGgFBQW2adMmW758ufXs2dNmzpzZqXuZPXu2nX766QHr/PLLL53jv/rVryw7O9tWrFhhGzZssHPPPdfOO++8sNtHVVVVwB6KiopMkq1atcrMwvt8LF++3P71X//Vli5dapLslVdeCTg+f/58S0pKsldffdU++OADu+yyyyw3N9cOHjzozLnkkkvszDPPtPfee8/eeecdO/XUU238+PHOcb/fb+np6TZhwgTbunWrvfDCC5aQkGBPP/10p+yjurraCgoK7MUXX7SPP/7YiouLbdiwYTZkyJCA2+jTp4/NmTMn4Dwd/t9VZ+zjeAv3fDOLnoyLlnwzi9yMi5Z8+6G9hFvGRVzxGzZsmBUWFjq/NzU1WVZWls2bNy+Eqzqyqqoqk2SrV692xi688EKbMmXKEa+zfPlyi4mJsYqKCmdswYIF5vP5rK6u7nguN8Ds2bPtzDPPbPNYdXW1denSxZYsWeKMffTRRybJiouLzSx89vFdU6ZMsVNOOcWam5vNLHLOx3fDpLm52TIyMuzBBx90xqqrq83r9doLL7xgZmbbt283SbZ+/XpnzhtvvGEej8f27NljZmZPPvmk9ejRI2AvM2bMsH79+nXKPtqybt06k2SfffaZM9anTx97+OGHj3idzt7H8RBp+WYWuRkXrflmFpkZFy351tZe2hLKjIuop3rr6+u1ceNGFRQUOGMxMTEqKChQcXFxCFd2ZH6/X5KUkpISMP7888+rZ8+eGjhwoGbOnKlvv/3WOVZcXKxBgwYpPT3dGRs1apRqamq0bdu2zln4/2/Hjh3KysrSySefrAkTJqi0tFSStHHjRjU0NASci/79+ysnJ8c5F+G0jxb19fV67rnndP3118vj8TjjkXI+Drdr1y5VVFQEnIOkpCTl5eUFnIPk5GQNHTrUmVNQUKCYmBitXbvWmTNixAjFx8c7c0aNGqWSkhJ9/fXXnbSbQH6/Xx6PR8nJyQHj8+fPV2pqqgYPHqwHH3ww4KmocNxHe0RivkmRnXHRlm9S9GRcNOebFNqMi+vw6jvRV199paampoA/TklKT0/Xxx9/HKJVHVlzc7OmTp2q888/XwMHDnTGf/7zn6tPnz7KysrShx9+qBkzZqikpERLly6VJFVUVLS5x5ZjnSUvL0+LFi1Sv379VF5ervvuu0/Dhw/X1q1bVVFRofj4+FZ/tOnp6c4aw2Ufh3v11VdVXV2tX/7yl85YpJyP72q577bWdvg5SEtLCzgeFxenlJSUgDm5ubmtbqPlWI8ePY7L+o+ktrZWM2bM0Pjx4+Xz+ZzxX//61zr77LOVkpKiNWvWaObMmSovL9cf/vAHZ63htI/2irR8kyI746Ix36ToybhozTcp9BkXUcUv0hQWFmrr1q169913A8YnT57s/Dxo0CBlZmbq4osv1qeffqpTTjmls5d5RKNHj3Z+PuOMM5SXl6c+ffropZdeUkJCQghXduyeeeYZjR49WllZWc5YpJwPN2hoaNDVV18tM9OCBQsCjk2bNs35+YwzzlB8fLxuvvlmzZs3Lyq+rzMSRXLGRWO+SWRcuAuHjIuop3p79uyp2NjYVu+sqqysVEZGRohW1bbbbrtNy5Yt06pVq9S7d+/vnZuXlydJ2rlzpyQpIyOjzT22HAuV5ORknXbaadq5c6cyMjJUX1+v6urqgDmHn4tw28dnn32mN998UzfeeOP3zouU89Fy39/330NGRoaqqqoCjjc2Nmrfvn1hd55aAvGzzz5TUVFRwL+E25KXl6fGxkbt3r1bUvjs41hFUr5J0ZdxkZ5vUnRlXLTlmxQ+GRdRxS8+Pl5DhgzRihUrnLHm5matWLFC+fn5IVzZ/zIz3XbbbXrllVe0cuXKVg/LtmXz5s2SpMzMTElSfn6+tmzZEvAH3fJHMmDAgOOy7qNx4MABffrpp8rMzNSQIUPUpUuXgHNRUlKi0tJS51yE2z4WLlyotLQ0jRkz5nvnRcr5yM3NVUZGRsA5qKmp0dq1awPOQXV1tTZu3OjMWblypZqbm53wz8/P19tvv62GhgZnTlFRkfr169dpT4O0BOKOHTv05ptvKjU19Qevs3nzZsXExDhP9YTDPjoiEvJNit6Mi/R8k6Ir46Ip36Qwy7h2vRUkDCxevNi8Xq8tWrTItm/fbpMnT7bk5OSAdyOF0i233GJJSUn21ltvBbwl+9tvvzUzs507d9qcOXNsw4YNtmvXLnvttdfs5JNPthEjRji30fLW+pEjR9rmzZvt73//u5144omd/jEBd9xxh7311lu2a9cu++c//2kFBQXWs2dPq6qqMrNDH3eQk5NjK1eutA0bNlh+fr7l5+eH3T7MDr07Micnx2bMmBEwHu7nY//+/bZp0ybbtGmTSbI//OEPtmnTJuedYPPnz7fk5GR77bXX7MMPP7TLL7+8zY87GDx4sK1du9beffdd69u3b8DHHVRXV1t6erpde+21tnXrVlu8eLF169YtqB938H37qK+vt8suu8x69+5tmzdvDvjvpuXda2vWrLGHH37YNm/ebJ9++qk999xzduKJJ9p1113Xqfs43sI938yiJ+OiKd/MIjPjoiXffmgv4ZZxEVf8zMwef/xxy8nJsfj4eBs2bJi99957oV6SQ1Kbl4ULF5qZWWlpqY0YMcJSUlLM6/XaqaeeatOnTw/4TCUzs927d9vo0aMtISHBevbsaXfccYc1NDR06l6uueYay8zMtPj4eOvVq5ddc801tnPnTuf4wYMH7dZbb7UePXpYt27d7Gc/+5mVl5eH3T7MzP7xj3+YJCspKQkYD/fzsWrVqjb/niZOnGhmhz7y4N5777X09HTzer128cUXt9rj3r17bfz48ZaYmGg+n88mTZpk+/fvD5jzwQcf2AUXXGBer9d69epl8+fP77R97Nq164j/3bR8DtnGjRstLy/PkpKSrGvXrvajH/3I5s6da7W1tZ26j84QzvlmFj0ZF035ZhaZGRct+fZDewm3jPOYmR3944MAAACIVBH1Gj8AAAAcO4ofAACAS1D8AAAAXILiBwAA4BIUPwAAAJeg+AEAALgExQ8AAMAlKH4AAAAuQfEDAABwCYofAACAS1D8AAAAXOL/A7YON29ebq6AAAAAAElFTkSuQmCC", - "text/plain": [ - "

" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "# Create a list of frame numbers corresponding to each point\n", @@ -335,7 +371,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "id": "09008ca9-6a87-494f-8b05-6370cae6a600", "metadata": {}, "outputs": [], @@ -354,18 +390,10 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "id": "01467f8d-667c-4f41-b418-93132584c659", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Video saved to ./videos/queries.mp4\n" - ] - } - ], + "outputs": [], "source": [ "vis = Visualizer(\n", " save_dir='./videos',\n", @@ -382,24 +410,10 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "id": "fe23d210-ed90-49f1-8311-b7e354c7a9f6", "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "show_video(\"./videos/queries.mp4\")" ] @@ -414,18 +428,10 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "id": "b40775f2-6ab0-4bc6-9099-f903935657c9", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Video saved to ./videos/queries_backward.mp4\n" - ] - } - ], + "outputs": [], "source": [ "pred_tracks, pred_visibility = model(video, queries=queries[None], backward_tracking=True)\n", "vis.visualize(\n", @@ -437,24 +443,10 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "id": "d3120f31-9365-4867-8c85-5638b0708edc", "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "show_video(\"./videos/queries_backward.mp4\")" ] @@ -485,7 +477,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "id": "c880f3ca-cf42-4f64-9df6-a0e8de6561dc", "metadata": {}, "outputs": [], @@ -496,7 +488,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": null, "id": "3cd58820-7b23-469e-9b6d-5fa81257981f", "metadata": {}, "outputs": [], @@ -506,26 +498,10 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "id": "25a85a1d-dce0-4e6b-9f7a-aaf31ade0600", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "IMAGEIO FFMPEG_WRITER WARNING: input image is not divisible by macro_block_size=16, resizing from (1496, 920) to (1504, 928) to ensure video compatibility with most codecs and players. To prevent resizing, make your input image divisible by the macro_block_size or set the macro_block_size to 1 (risking incompatibility).\n", - "[swscaler @ 0x6363e40] Warning: data is not aligned! This can lead to a speed loss\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Video saved to ./videos/grid_query_20.mp4\n" - ] - } - ], + "outputs": [], "source": [ "vis = Visualizer(save_dir='./videos', pad_value=100)\n", "vis.visualize(\n", @@ -546,24 +522,10 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "id": "f0b01d51-9222-472b-a714-188c38d83ad9", "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 20, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "show_video(\"./videos/grid_query_20.mp4\")" ] @@ -578,7 +540,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "id": "506233dc-1fb3-4a3c-b9eb-5cbd5df49128", "metadata": {}, "outputs": [], @@ -597,26 +559,10 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "id": "677cf34e-6c6a-49e3-a21b-f8a4f718f916", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "IMAGEIO FFMPEG_WRITER WARNING: input image is not divisible by macro_block_size=16, resizing from (1496, 920) to (1504, 928) to ensure video compatibility with most codecs and players. To prevent resizing, make your input image divisible by the macro_block_size or set the macro_block_size to 1 (risking incompatibility).\n", - "[swscaler @ 0x5734e40] Warning: data is not aligned! This can lead to a speed loss\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Video saved to ./videos/grid_query_20_backward.mp4\n" - ] - } - ], + "outputs": [], "source": [ "pred_tracks, pred_visibility = model(video, grid_size=grid_size, grid_query_frame=grid_query_frame, backward_tracking=True)\n", "vis.visualize(\n", @@ -636,24 +582,10 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": null, "id": "c8d64ab0-7e92-4238-8e7d-178652fc409c", "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 23, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "show_video(\"./videos/grid_query_20_backward.mp4\")" ] @@ -677,7 +609,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": null, "id": "b759548d-1eda-473e-9c90-99e5d3197e20", "metadata": {}, "outputs": [], @@ -689,7 +621,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": null, "id": "14ae8a8b-fec7-40d1-b6f2-10e333b75db4", "metadata": {}, "outputs": [], @@ -708,57 +640,20 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": null, "id": "4d2efd4e-22df-4833-b9a0-a0763d59ee22", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 26, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAigAAAFECAYAAAAN08U5AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAACOWElEQVR4nOz9eYxt2VnfD3/WWns4Q0237tjt7vaM7TYeEpvY9yWKfgqOHWSiIIxeElngICtRrMYKWELEEjiCDEZGCgkK4CiKAClxiPiDRDhyiOUEo8SNsUyQHPPagR/GbXf3HetWnWkPa3jeP9ba+1S1G+P2QN92r8/lcKvOuM+p697fep7v832UiAiZTCaTyWQydxH66T6ATCaTyWQymSeSBUomk8lkMpm7jixQMplMJpPJ3HVkgZLJZDKZTOauIwuUTCaTyWQydx1ZoGQymUwmk7nryAIlk8lkMpnMXUcWKJlMJpPJZO46skDJZDKZTCZz15EFSiaTyWQymbuOp1Wg/PzP/zzPe97zmEwmvO51r+N3f/d3n87DyWQymUwmc5fwtAmU//gf/yPvete7+Ef/6B/xe7/3e7zqVa/iTW96Ezdu3Hi6DimTyWQymcxdgnq6lgW+7nWv49u+7dv4V//qXwEQQuD+++/nne98J//wH/7Dp+OQMplMJpPJ3CUUT8eL9n3PJz/5Sd797neP12mtecMb3sDDDz/8Jffvuo6u68bvQwgcHR1x/vx5lFJ/LsecyWQymUzma0NEWC6X3HvvvWj95Zs4T4tAuXXrFt57Ll++fOb6y5cv85nPfOZL7v/e976Xn/zJn/zzOrxMJpPJZDLfQL7whS9w3333fdn7PCOmeN797ndzcnIyXh555JGn+5AymUwmk8l8lezu7v6Z93laKigXLlzAGMP169fPXH/9+nWuXLnyJfev65q6rv+8Di+TyWQymcw3kK/EnvG0VFCqquI1r3kNH/nIR8brQgh85CMf4erVq0/HIWUymUwmk7mLeFoqKADvete7eNvb3sZrX/ta/tJf+kv8i3/xL1iv1/zgD/7g03VImUwmk8lk7hKeNoHyfd/3fdy8eZP3vOc9XLt2jVe/+tX81//6X7/EOJvJZDKZTObZx9OWg/K1sFgs2N/ff7oPI5PJZDKZzFfByckJe3t7X/Y+z4gpnkwmk8lkMs8uskDJZDKZTCZz15EFSiaTyWQymbuOLFAymUwmk8ncdWSBkslkMplM5q4jC5RMJpPJZDJ3HVmgZDKZTCaTuevIAiWTyWQymcxdRxYomUwmk8lk7jqyQMlkMplMJnPXkQVKJpPJZDKZu44sUDKZTCaTydx1ZIGSyWQymUzmriMLlEwmk8lkMncdWaBkMplMJpO568gCJZPJZDKZzF1HFiiZTCaTyWTuOrJAyWQymUwmc9eRBUomk8lkMpm7jixQMplMJpPJ3HVkgZLJZDKZTOauIwuUTCaTyWQydx1ZoGQymUwmk7nryAIlk8lkMpnMXUcWKJlMJpPJZO46skDJZDKZTCZz15EFSiaTyWQymbuOpyxQfvu3f5u/8Tf+Bvfeey9KKf7Tf/pPZ24XEd7znvdwzz33MJ1OecMb3sAf/uEfnrnP0dERb33rW9nb2+Pg4IC3v/3trFarr+mNZDKZTCaT+ebhKQuU9XrNq171Kn7+53/+SW9/3/vex8/93M/x/ve/n49//OPM53Pe9KY30bbteJ+3vvWtfPrTn+bDH/4wH/zgB/nt3/5t/t7f+3tf/bvIZDKZTCbzzYV8DQDy67/+6+P3IQS5cuWK/MzP/Mx43fHxsdR1Lf/hP/wHERH5gz/4AwHkE5/4xHifD33oQ6KUkkcfffQret2TkxMB8iVf8iVf8iVf8uUZeDk5Ofkzz/VfVw/K5z73Oa5du8Yb3vCG8br9/X1e97rX8fDDDwPw8MMPc3BwwGtf+9rxPm94wxvQWvPxj3/8SZ+36zoWi8WZSyaTyWQymW9evq4C5dq1awBcvnz5zPWXL18eb7t27RqXLl06c3tRFBweHo73eSLvfe972d/fHy/333//1/OwM5lMJpPJ3GU8I6Z43v3ud3NycjJevvCFLzzdh5TJZDKZTOYbyNdVoFy5cgWA69evn7n++vXr421Xrlzhxo0bZ253znF0dDTe54nUdc3e3t6ZSyaTyWQymW9evq4C5fnPfz5XrlzhIx/5yHjdYrHg4x//OFevXgXg6tWrHB8f88lPfnK8z3//7/+dEAKve93rvp6Hk8lkMplM5hlK8VQfsFqt+KM/+qPx+8997nP8/u//PoeHhzzwwAP88A//MP/kn/wTXvziF/P85z+fn/iJn+Dee+/lu7/7uwF42ctexl//63+dv/t3/y7vf//7sdbyQz/0Q/ytv/W3uPfee79ubyyTyWQymcwzmK9wonjkf/yP//GkI0Nve9vbRCSOGv/ET/yEXL58Weq6lu/4ju+Qz372s2ee4/bt2/K3//bflp2dHdnb25Mf/MEflOVy+RUfQx4zzpd8yZd8yZd8eeZevpIxYyUiwjOMxWLB/v7+030YmUwmk8lkvgpOTk7+TD/pM2KKJ5PJZDKZzLOLLFAymUwmk8ncdTxlk2wmk/nyKAClnvz68Rt15vsv6bOKfOl1CM+8hmwmk8l8dWSBksn8GQyCQwFKKZRWGG0wRmOKAq0UIkJRFEyqimk9YVpPqKsKYwyFMVRlxayumZY1hVLMy5qyLFFViVKK4BwiAQDrHcfLBcv1ik3X0PYdXoS272n7nt46eufw3iMIznm89/gQEBEkRHEz2MuegTazTCaTyQIlk/nTGIVJ/AKtFEprCmMoyoK6LKnrmqosKYxhZzrlYHePw509zu8esDffYWc6Yz6Zsjufc7i7z7ys0b2DzqGVRuoCYwzKOggeELwEls2a42bFottw3Ky5s1lxZ7XkznLJyWbDYrOm6Tp652j7jq63OOdw3hN8IIhEYZIqMacFSyaTyTwTyAIlk3kCp4WJQj2hamKoqpLZZMLOfM7ezg4HO7uc393jysF5Lu+d4+JsjwvzA3aqKaUx1EXF7mxOEQRtA0E6lDgKZZBgEAHtLC648bV8PaWdHNJooRXPyluWtuHG4g6P37nN48e3OVotWDQbFpsNi2ZD07Z0fY+1LlZVgicEIYQAuZqSyWSeYWSBksmcQik1XrRSaK1jK8cYqrJkUtfs7e5w8eCQB85f5vn33sd95y5yYTLnUjnnoJoy1SWFKlBBoX1gWkypqXC2R/nAat3SbToKbdg/OKCcVCybE5rGolAUZcFsNkc0bLqeXgmhLAgzRbNzkaPDFbc2S466NTebJY8vjrmxOOb2YsHxcsG6bdi0HU3b0lsbBYv3hBAQCcTCShYqmUzm7iYLlEyGWDVRWqGUxmidhImhKgsmdc10MmF3NuP8/gEvuOc+vuXeB3jhpfu4Z+88B+WUuYO5U5QelBe8czjnkM7ijxtWQfDOUmiDW29Y3jlGKaE/OUHXJatmRdu3gFAUJbu7OxRFQT2Zcri7x8Y77NoyVQWHxTlecHiO1ggL6bnZb7jZrLixOOHGyRHHmzW3lsdcO7rNncWCxXrNpm2x1uG9w/tAkPAlXpVMJpO5m8gCJfOsZ6iYGKMpTEFZFJRlwXQyYW93h8P9A66cv8hzDi/wwPnLvPjyfdxzcIFL5y4yrybIpsU0lqmDsOlZL07oV0u87fGdZbPesG4amqZhd3eXSVXz2GOPEIJHFxqH0IeeIAGtFUVRUBQl08mUSxcvISKcrJYoU1CXFXVZMasqDqYVF+s5904PWMwtq/2O1ZWWxluOuhWfu3mdR25d57GjWzx+5zaLZk2bKitd32NdFCsSomclTwllMpm7iSxQMs9aBq+J1orCFFRVOVZLdmZzLl04z/333MuL7nsuL7rvuVzZO8fedMbOdMbOdMJeNaVSBjWvxgBnc7wmHN9kc3JEu1nTNBtOVitO1is2bYO5XQCKtm1ihaau6J2j9xbnPQKURRGFkim4fec2e9cep57P2NnZhZ09vPes1yvUbainNUVdc1iVXN45h5sYOtvSzzzfOr+XoytrHlsc8SdH17m2POLa8pjH7tzi1uIkGm3blr63+NQCGiaBMplM5ukmC5TMs5LTPhNTGCZVzc7OnP2dXfb39rhy8RIPfsuLePDFL+F5l+/j4v45diYTppMJZVVQaDBNi7QWXRaowsSJGdtQ1xB8Q98sWC+X3L5zxKJtWNsOFwLOx2mduq6pXMVms6GzPV4CKIUxBdPpnGlZ0dseB0x9j/WesqpZdj3dpqEyBft+n31TMqkNla5wXjE1M1CKfTVlX0+5UO/xvP1LrGzDFxe3+KNbj/PI8U2uLe/w+NFtjheL1AKyZ6oqWaZkMpmnkyxQMs86RgNsyiiZ1BW7u7tcOjzPvZev8MLnPZ+Xfcu38OLnPY97L1xib7ZLXRUUZayyaAPKdogL0FvE9xA04iz98hZIgw8tTbdgtTmhty1t17BqG6x3WOdAQG/WiAhd2xEUaKNRWqOUpmk7ZvWE+WxOJ4FV13B8suDGjVsxU6Wq2Z3N2dvdoywLvPPcfPxxwLAzmzOrJxRKU6iaaVFwYCZ0peVysccD0ws8eu4WX1jc5rO7j/HInZvcPD5isVmzXm9oux7nHSFIrqZkMpmnjSxQMs8qVJrMKYqCOrV0duc73Hf5Mi99/ot41Uu/lZe+4IVcuXwPB/v7TCcVVVmiCkEViqA9QRyEhgKL1g7fx+kc3Ttsc8zJ6haL9Q0Wq9ss1kva3tL1DZt2Q58ySsIQqjZ8rUCLRpsCRPDB48Vjg6doNpRFRVlUFKpkb3cXMxMOdvYBxWq14WRxTFVPmM/3ENvTWI/WGowwqSqqsmZSTNid7HB+vs+9Owe8aP8SLzl3mT9c3uCPjh7nkTs3+eKNG9xZLNg0saLiT2eqZDKZzJ8jWaBknhUoBUrF6ZyqLJhOp+zM5xzs7PLcy/fwmpe9gle/+OW88L7nc/7gHNPJlEKVKAeFFsyshInC0SOtQ3kLriXYBt9uCLbDr9asTm5wvLjGneUNFpsT2r6ltx7ne7w4OmcJXkZxktJJEKVQStApe0WCx9seK0JpCirnmVRCXWnarmOnmtJ2HY8++hjTyYSiMhyeP8+5/T2C9bRNj1aKojbUZYnZ3ScYhe0tdd8xKQx79YTz0x2eMz/gBXuH/PHFW/z/9h/lj68/zuO3brFYrWm7DnsqUyWTyWT+vMgCJfNNz1g1MYaiKJjPppw7d477rtzD86/cy2uf9yCveO6LubBznv1izq6aUEqBbx3Wd3gTKKSmMjO08jH1te+h2aCaBtWuaJbHHN+8iT1ZslrdZr1Z0DQrus5j3VAtCThn8V5AQDi1b0cptIpeW60NCiF4h1cCWmHEIxJQOm7wsd6xaTZcuXSF2XxGbzuMMWzWa9pNixJNPanRuqQwBeI9YgPKOoz3KKWZTOfMyop9XXFxMueFexd4/u4FPnXuAp969E/4/I3rHJ0sWK03dH2fslRyNSWTyfz5kAVK5puWmG0Sc02KoqCuKyZVzaXD83zLA8/nL7zo5bz08nN53v4VLhS77KopE19QND2y2dCslxSlwoeW5Z+s2D/cpd4p8b6lWZ9gNwtM3+H7juXimJPbN1jcOuLkzi3WtmUTHB0BKwEvghAI4vBIrJKgCKgoSlBIAIUgEidq0ApjQJQgWlBGo2IpiGA0elJDXdGFwN7+PnU14fbNm/RNS1lUeG/xoad3PbPdXYqiRHtP33XYrqWoKqrSoLXB6JL9yTkOzIRLZsa95S7/Z+eQz17/Il+8fYvj5ZKm7WKWyqn2VCaTyXyjyAIl803LIE7KsmBax2j6/Z1dHnz+i/j2l/4FXn75hdw7PWS/nDNXBaZxhGaB1YJzLZvlMXv7M5RvaK5dp/m/LfPdCdTQSoP1DeJbnLN0bct6dcLtO7dYN2taPJ0ROhWwKtCKwyuPGEGCR9AIGp+OMww7fyQQtYmKqbKmBCVoo9GFjqPRWiNa47Xi5skxe7u7zIGbt24RrAWErm+wrqPpNWVTsl6doLU+E93vug1oDUrRe4cpDHWA56gdyv172ZvMOJjPmM8mfP76dW4dn7BpYjqt94Ew5qdkMpnM158sUDLflKiUbzIEru3v7vCcC1d4wZX7uPqSV/HyKy/kOZPznDMz6Cx+sca2G2zfUVUGUwjra49SnttBK0e53LC4c4MT26BMQE80tvLYwuJVHB1u2obG9/QEOiO0hdAW8bZeOXocGGJ7xwWCAkGjDIgaPB4KDShRKFWMFROI3pSqrplMJ5RVFQVCCOzu7rKzs4NtWu4sljSbFYr43kspCeIJ3qONQeu4edkYQ1lWlMZQlCXKKZqmxSNUAS6ommp2yMV6wvmq5pNFxWeLgpvHJ2w2zRj0hs8iJZPJfGPIAiXzTYdSKlVOSuazKRfPneO5V57Da1/4cl566bl8y8FzuKj32bUlRevoVyuaxR2axQltu6IsFNN5xfr2LbqT2ygTcL6n79ZsmgXLxR1UpdC7BlsLalIiCnrX443GFYIzYbxYHbAaHAHRCozGBk/QUNQVz3/B8zk8POSxxx5js2m4c+sWKghapd05bCso09mEc+cOQKKpdj6bYm3PH//J5+g3Lb7t0BIwRYHzjt71GKOZTCbs7O4SfGCxXCIhUJQls9mMvYMDTFGglUKCoJWmcp59rzlf7bN/saA2JaYq+aPqGrePj1mu1mzajr63kA20mUzmG0AWKJlvKobI+rIs2ZnNuPfSBV7+vBfybS/6Vl558QVcYM5FtcuOnqFtwDcbNneOWN65zeLODXrb4UOPNoGu77DS0fkW53vKWlPUmpO+oe866AWZaqrdOZQG0cT2kAl4EwgqXrzyeDyiAqLi7aEqOX/5Cq987Wu4eOk8N2/e4IX3HnDxwiUe//wX+NQnPklzskYHT60Vk+mUyXTK3sE+8905d24fY3TBnTu3uXnzOtOqxiiNOIdGcCHQhIBSaZOxAnPdoLTGWhvj/KuSTbPhzskxVV0znU4x2iBAoQ0lBr9u2RfhJZNzuCvC3mTOH8+u8djt2xydnCQDrcXj8zhyJpP5upIFSuabhihO4oK/+WzGPRcv8qoXvYS//JJX86rLL+SinTG1sBsMyjlwlr5ZsFjdYL05wtoV1nYEPMH1CIJzDb1dozRsekfnHG1occS2iW8Ck1KhpcLrgPcWS8CrZG5VMqTgI0DQilCUPP8VL+O7/r/fx/Xbt/mvv/lfWCwWGG3Yme9SK03YnWKC4DpPIGalzGYzlNZcu3GD9cmKnfkOCs3ly5cpTcHRzVtgHYVSoMB7jyIAMk7fGGMAsM5Cs6HtOrQx1JMJ1lpm02nMTwG0Mdiuo2uW7JjAt073OX9hyuF8zh/O5vxx+TjX9RHL1ZqutyncLS8fzGQyXx+yQMl8U3BanMymUy4dXuAVL3wJf/lbXsXL51c4vzFMXUD3Fqs6gsTtwqvlCd1igeu6FKAmOAn0ziLa09kO5z3KBhpp6XxLMIKqNT4EenFge1QhWB/wfU/oLd55rIAX8IBXgDIEESa7e+xeucznb9/g4U98gv/9f/8vbd9ifaAqSqRzXK7m3FvvUyhNEDh/8QKmLPjCF7+A7SwGg9GGwpQ89uijKBHmkyk7sxlKPJvNBmtttK9oRfCeIB58WoyoFdpoULHttGnXbLo10+mU3Z2d6DEJnihVHN4FzMpysS7Qk0PKC3HzswAIKLWh68E6P4bQZTKZzNdCFiiZZzyj56QomE2mnNvb58X3PZfXveBbeen8Xi72FVMfEN/ifc/GtqQgEmyzol0vWaxOCEojKtDbntVmRecbXOiBOFrrfI/zLb7wEApcBb4E8RYlCisB5x3iXNxrEzw2BDwSx4m1RhmNrkr+3899js8++kUWmw1VPcFrhe86emvRXmj6nrW07OqKup6gjeHO8TFt26GDwihN13b02jGtai5fusAD9z6Hdrnh2mNfwDlLVZWgFU4cQTx9H8eERYSqii2eQgq0BJquoe8tciJMpxOU1sx25sx2doCSwhja1Qq32jAtDRfE8MDeIW3fYa1F0s8BOqwbKjZP6z+LTCbzDCcLlMwzmjML/+qK/b1dXnDfA7z2+S/lweklDm1B0Xi0KaiqinbTcufOEc5ZtDH0XcvR8oTbd26jjMEUhhAci9WSxjUE7SFYBEGZ2K7xIeD6nqAUog3i41SOxxN8NLVaCXTe4UIgoHBaUjALnCwW3Lh9jVaDFdIWYY8KQiGKwmgmVUWhY0YJCNevXwcHWhUUuqQsS7TRGG2o6woR4fq162xOlmitOX94SAA2XcviZEnTrrGux1mH1pq2B6UVx8sFQcXgN6XihI8cC0prprMp5y9exDqLCFy+cplpscNitaJctFyoFf3+IdY7AgDbCH8rIBKexn8ZmUzmmU4WKJlnNCqJk7qs2J3v8NzL9/JtL3mQv3jhudxjS6aNQ3cWV1o2a8vJyW1u37rBet3QNBusc7TNitV6hZMAGpQSOtvglUvjvxaFUNZxJ48ntW7G/o0g1sfUVxSeWE3pxcfqiYBHIUYI4lFKsTetmWhF4z1t5+iDoMRTmpJZWXEw28NsAuI9fd9jTYUOGlMUVGXJZDJBpxaPUprlcsnSepQXqlJzs9mwaVo2fYsNDhcsQTziJVU7AoGYUmuDx4eAUhoQiqJAac3GdtxaxPyUyaRm07Xs7e+hlWYiiumq4/KspDs4pE9Jt7GVFP0uLnlfMplM5qshC5TMMxatY2unKkt2d+bcf+kKr3rui3j5/j3cY0tmm4B0PW2/ofMty3bFYnlC13U0tqexLcvlkvVqQdu1BAKq0OhSE0IUJ6hoMhUE33doASljKUQrDaagt46gNOi4Q8d5jxMhKHBKxhYPGlzwuCDUZcVsMmWCcORXBB+wwRGCpSgnhN7heoehoO96XOmYFhPqumYynTKf7VBVFaAxQNc2oBRFYUB8bP/0XfSIGBU9N6m64ZyLqbYaRClEg4SA85YQPFppTFmgjUHSVJQXT9f3LJYLJtMp1aRmZgrsumNuLJdnc/R5QUMUKCEKMxGf/SiZTOarIguUzDOO04v/imSKvXL+Aq9+wYt5zeXn8ZLZJS64in69Yt1t6GyDLgRUnKVROoa4BQn44PAaqAoInj44ihAwxqALjSk0Ig7vXfKghJToqjDaoJVBxMc8EKWQ4AghhtkHpXASBYqoKHNiRSWgiWmxtdJMTIlTHh8E5QV8oHctyikM4IrkG6lrJpMpZVlijGFnPmdv7wDX9dy4cQ2tDLU2aE00DE8mtMERtODEslgcs1k1BAkEia8XzbsKCQHv4ut4FWJFxWh0YYAiTiGFKNRs8JTBUdU1xmgOxCAIO+fPM03tJhEIEtJm5myazWQyT50sUDLPOKI4URTGMKlrzu3t8dJ7n8urLz2P59aHXDBz+uNj1usFXTLE7sx3UFpYrpbMpjPKcsJisQQ09WTCzqSKo8RdQ9OsscGDE4wSVBBCgKAMFDpuHkYjovA+oEuNERN37gQhePCi8KJiiyNtLQ4iBATrHUYCeI8ARsCIUEgULdr7mF0SFEobRBSFqZlO5uzM9ziY73HPxUvszndYnCzp2pbalEhwKAK2t6Bgd3eHndKw7hpW7QpTFoiB3ntCcHiJx+JFiPPBAHGxoi6IEzxaY51HB4GyAB8ojLBarVCbDfXOlGldYrRhKorZ4UWcD+mSlgsiefw4k8k8ZfRTufN73/tevu3bvo3d3V0uXbrEd3/3d/PZz372zH3atuWhhx7i/Pnz7Ozs8Ja3vCUa/E7xyCOP8OY3v5nZbMalS5f40R/9UZxzX/u7yXzTo9QwJmtiUuxkyr0HF3jZuXt5rt7jQGr8sqXfNDjbIqGn7zY8/thjPPboYxzfOeHo6JjbR3fwXuLaPm2wwdMHPya9ehE671g1Dcv1huWmZd12dM7jAvgA1lr6vsM7H5f9Cbig6H2gtY62s3jvkXRydqmC4iWN/KpAsB1KHIWEKFIA7QPKxzWCSikm9ZTZfE49mTKpJly5fA9GGU6OFyBCXVQUpkitnQ3r1YKjO7f44uNf4POf/2MeeeRPePSxxzg6PqZxHV4JoRCCiW0oH3x6/47eWdq+T7kmAecF6zzWeXoX6HsXtZP3eOfwvaNShnPVjFkXmDSWe/bPcengHLvTKVVZYLRJEz6ZTCbzlfOUBMpHP/pRHnroIX7nd36HD3/4w1hreeMb38h6vR7v8yM/8iP8xm/8Br/2a7/GRz/6UR577DG+53u+Z7zde8+b3/xm+r7nYx/7GL/yK7/CL//yL/Oe97zn6/euMt+UxG6EGrcTTycTzu/t88ILV3jR7kXumR1wcbbPTlGhg2CI7SBre9brFV0XR2I3mw1t26KVYjKZUFYlCoWzjt5aXKpsxKA2R99b+r6n63uatmW92bBar+NOmq6PHhHr8c7jvcdaT9v1cfOvD3iJLROfRo5FxU3Gznt88ATv8KlqopWOE9BpAkZrTVmV1FVNWRSICCfHx0ymE170whdyzz33UFVV9LDYPoalITEsToMyGlPEFkwgtlviH0FpqOoKUxSQhB/DtmQReu/obdy5Y52lbVvaNvp2hs+y3TQsjhdoYK+smQfYE7jv/HkunDtgPp1QFgVaqyxSMpnMU0LJ11B3vXnzJpcuXeKjH/0of+Wv/BVOTk64ePEiH/jAB/je7/1eAD7zmc/wspe9jIcffpjXv/71fOhDH+K7vuu7eOyxx7h8+TIA73//+/mxH/sxbt68mYx/Z+m6jq7rxu8XiwX333//V3vYmWcoY4x9UTKpaw7393npvffzhhe+iv/P4fN5zs55VO/pVyva1YKm3eDE47yj6zoWmzU2beItyioKFdfjlaf3luVmSWd7rO/iIj4VaNsG3/Z48TjlkCKAAmVAlQpTGfSkQtcVwUAnLokZG82oLkTDKGmSR8B7R1VVFGWJdY6u8zRtj7eeqqgolaYIMJOS3XrOhXOXue/yvexOd5mVc55z+R7uv+deplXNnTt3ePyxxzi5c4y3Dft7c6bzGXfWC6yP4qfpO24tjnGpQtK0LV2/AaWoJzO88zRNE6uYEpcSDsm36YOPrSetk5iBoiookhemqEtQir39XZyGE+O5hefxbsMfPfY4N+8cs26aaCbOfpRMJgOcnJywt7f3Ze/zlCooT/YCAIeHhwB88pOfxFrLG97whvE+L33pS3nggQd4+OGHAXj44Yd5xSteMYoTgDe96U0sFgs+/elPP+nrvPe972V/f3+8ZHHy7GOonmitKQpDVZWc293lRRfv4UW7FzinKuy6Y71c0LdrtAal42OqqqKeTKjKknoyYTKN0zBDhkpVVtR1DcRWjNGGqqopTIHRRdovPASRqXgCF4V4hesD7aZjs9rQbjps6wgOJDCm0lo8FoeTgJdooO1SC8jaWHEJPmaPqEIjpUEKHbcdp+OPLRzB2p7r16/x2c9+lj/8oz/kxo0bKK3Z3dvFVCWL1YovPvoot28fcfPmTR679hi379ym6zvQmp29XeY7c0xZxePoWrph2kcpREEYI/rTPiE8Tjw2uLRTSGKlqO9x3lNVNdpoVoslbtMwsYFdLxyYgksH++zvzJnWNYUx6FxFyWQyXyFftUk2hMAP//AP8+3f/u1867d+KwDXrl2jqioODg7O3Pfy5ctcu3ZtvM9pcTLcPtz2ZLz73e/mXe961/h9rqA8C1FqNMZWVcV8OuXecxf4lgv3cN/OOcpeI85jlIrjvSFGroPElo3EfA/btXHMuFlgrY2x9l1P41oUUNcVve1ouw4bbPyNn7O/8Zsijt+GNH6MKGKKfMxCcT62erwPhCCxxYMgolCSBA4K52MLyKX7mcKgjUltnjj5Y4yhLAq89zS+pfM9LQ2+sxTacPHiBeqq5ub16xwdWZrNiq5pafuW1lmCCqjKEIyibzfEzT5QliXOOZq2xbvwpO2XOHk0fpeMviHeEEIcXe5aUPtM6ppmtaJvHUpV7MxqevF0Ozv4NMnjvCdIrKCEXEXJZDJ/Bl+1QHnooYf4P//n//A//+f//Hoez5NS1/X4G27m2cdYPVHJe1LXHO7u87zzl3lgfp65qiiNQRlD23laH3DOMdvbRYD1Zo31TTTYAt5ZEIdWAeUdeI8h7ssRkVilMRqtS5zWMSOk99EcGhTWBmQYzxUB4k6aoMArwXsZRUeQlAeCIkhsnWgFWmmCD3gveAHRKl6UImggxOt0YUBB0zYoF4PhalNSaTg5Aec7ppMJJ8d32DRrFDCbz6gmFWXfsekbrHd0bU9re5aLJdPplCAuiqAxpC3u1jmtU2T89Ld/xeC1AFoz1JOOj49juBtpm7GACsLOvMJqTXXpCk1nado+7eoRVG71ZDKZP4OvSqD80A/9EB/84Af57d/+be67777x+itXrtD3PcfHx2eqKNevX+fKlSvjfX73d3/3zPMNUz7DfTKZMwyTO8ZQlSWz6ZSLO/s8b3LAJTWnpuTcuUN6a1l1DbPLl5hPSiaFxvY91fEJd46OsL1lUpUUezuE4DGmoGkaFqslHRVr36K0wgdP3xeEIPTBoQtD6UuCCDY4WtvhXIy/F4FYH9EEIBDwksaSBVwSJ16pOMEDaFGYEJBAbPGIQukU5KbBqNieEg1WHOt2jalBewguoIKjsRoaB0Vgb3/GhUuHrJslm/UKlKKcTtBVSVgHumWH7Tu6tsEFR9usQceKjdIxwj+IoNUZSRIrQ+PPQEdRRkhG2hiUJ1ph0wSe1vEz0D6g+4AuFFMUQSsOd/e4fXJC03VxsklC3tWTyWS+LE9JoIgI73znO/n1X/91fuu3fovnP//5Z25/zWteQ1mWfOQjH+Etb3kLAJ/97Gd55JFHuHr1KgBXr17ln/7Tf8qNGze4dOkSAB/+8IfZ29vjwQcf/Hq8p8w3EcNY8Zh7UlWcm+1w/9457p/ss19OqYuKvosppzt7e1y4/1669TFH1x8nOIdKEyTaxD07ZWmoyglKGZRIDG3TikuFonM9XdciKk7AtK5n1bWsNyuavqX3jmpS09metmvoXTTDhqDwRvCa2M5J1YT4J7Z0hpZJjEYRSGmrQQIKgyK1VVJ1Bq1xwdN2DWVQYAUlwmS+CxJS4FpgtV4xqWqm0wk3b96IibLG0PUdzvcIQlmWlN7iehtzbYNE70sKvVMpTI5UAQGQVBmK38TpoyBCkSaLfPDjbXE3kKF3MbAOEbp1Q1CgJXBuOmV/Z4d102CdxQeNUrmKkslk/nSekkB56KGH+MAHPsB//s//md3d3dEzsr+/z3Q6ZX9/n7e//e28613v4vDwkL29Pd75zndy9epVXv/61wPwxje+kQcffJDv//7v533vex/Xrl3jx3/8x3nooYdyGyfzpChiS8QYw6yecGX3gBcfXOa+2TkOpjuEzrLuNmitqLViee0at2/dYLNc4LxFa81sOsMozXJxh75ztOsNSmnO7e+zt7+LMpqmbTk6PmJjYsw7RtGJZW6nrCc1x5sV666ht11KSPW44MFvE2M9sdUTBAIqbkiGswIFootWwCsVqw4qXh8rGSpeISDexzHp3iO9p1CGSilKoyjrgrZvWD++GEeAgwS887Q2CqTe9myaDUHHKokxJh6P+PHzjT4b0KTXFEkjxzE9FogCRQSlhuWGAe8DogSTxp+NMYizEAI+veeqKAjeMxXDlcPzLNdrur6Pj5VtiF0mk8k8kackUH7xF38RgP/n//l/zlz/S7/0S/ydv/N3APjZn/1ZtNa85S1voes63vSmN/ELv/AL432NMXzwgx/kHe94B1evXmU+n/O2t72Nn/qpn/ra3knmm5Lt9I6iLAt2ZzPu2z/PC/YvcmG+T2VKLC7mbAShP1nQe0vftPS9ZTadcrC/j7MW3/YU2lDNZ2ilmE6m7O3vUZYF6/UG27XoEKiKMmWHBFwfqJQiVDVeAkVR0LuKsiyhiYFu3hLD104N5wZCyjtJgkUnkZK2Fw+LfoeqRJBYHSEElDGxmuEFRYDgo3AI8TXatmFvdydWhrSiDw6vYvtJQojTN+nDczhssCn/JAazkSo73scKyjDMZwoVJ4nS5JALQtt2qcohqNQCCiFO8CilCFqjfBxTts4RZ5TSz6wsMRpKEYrecpBSf9dtk7woAZ+qLZlMJvNEvqYclKeLxWLB/v7+030YmW8wClBaY7SmrioOD/Z52XOfx1993sv59ntezAsPnsNMCrrlEul7xAecdzR9z/F6GVsbRuNsnHgptEawzOc1VVnF/TrW0XYNTdPE0DQJbLoumWUFK/Hk7wRaZ+mDx3nLYrNi2W5YrNdsupbG96xsy8b3OKPwyhMIWEUaF45+FOtj5UGFWLHwPtB5Fz0dxmAKw6SsmBvDrId9KdgpJ5RBQy9My4r5dMaFixepqwprLdZZpvM5zXrDcrFIHo8oQpo+jhF7STNHyQUbQgxf8xJQygCKoijG8WtjDMoUNG0XP7+qTIJLQGm0ikIm7jUyGK3jckEdjchaxxUCpihwGpZYlrXhSAJ/+PhjXD+6w6ZtcG6YtspkMs8mvpIclLyLJ3P3orYVlMIYZpMpF3YPuHfvkHPTXSZVRdj0BO8JKZcjiETPRmG4cuUy6/WSk5NjtIKyMNSTCUURl+PFceOGrm3x3iX7RVzWR5rOqYoKtKasanb296CMbZWbt29xZ3HCyXTF7cUxi3YTKx+dogkuVkQUjBUVpaJgCYEQAko0hliBCQJKFEHF6RYXPCEtIox/NKUyKBO3IJdVie27UQg0m4bjO8eE4Ak+0NvYQgmSNiT7WHkJg8+EWAXxwUfBouIeRZWSdVGKtm0hSDSzEiufKJXET/yclNq2hGT7IxsJ3kcRI4qp0VgnnN+ZsTh/yKppsNYSfKo0PfN+T8pkMt9gskDJ3MWocYKnKAp26gmX610uTfaZmRLXbFDWow1Y8XS2o+1a2r5nMp2wWS9YLxcURmG0oiw1k0mNhMC1G4+xXCySWBC0TiFiSqHTkj6NxgQVhUIw+EUbT6QqcH6yx+X9C5w0az5/7VFurRbU3Qq9LtBdg3YdbXAoncLORHAhxBwSrdACPsTKTGyxxNZIAByCVVAUFTMqJrqgCgoKIQRL0wS6dhMrLsZEk64TkDAGrPkhw8XJuIUYnRb3iRDweJEYOxcEhSY4YbXcoMsCEehdF5NjtcJaR1UU6OShFYbIOpKxNnlnktBRKMT7KL6MplAFM6Owm47z8x1uzGdsUlto8MBkMpnMabJAydzVKMAYHaPtd/e4snfIpZ0DJqaE3sX49bLAlwUueDZNQ1EadnfnlKWhrAomk4q6qil0wc7ODtevX2O5XOKci9MnKJy16DIaOhVQpJ04Wmkm1YSdvV1m8zl937PcLGn6Ftd0mCAc7h1QzqbM2h3q6YR6teC4XePbFa1YRAlBYvuIlEYbhCgo4hl9OzkjQggBLYpZWbOjJxQoSj8og+2p3Dk3ekHEAXgwICHE6kQQJIBPYXMi0YMSKy8Ko6JhNoTodfFK4boObI8xBi8xXA3ApdwUnaaL4uNCejtpA7Ixo/FVQRJGAbRCiVB6KK1lXk+4sLfHYrWm66M/JueiZDKZJ5IFSuauJXpQ4ubi6aTmwt4B9+2f59xkzkQKdAl1VWGUQivh9pFDa2FvZ8bFSxc4Pr7NZFpTGo3tO1q34caNazzyhUdQwJXLV5jNpojz3LxxE+/8+Nt/27VRnJgJhTEUJo4Bt01Ds1rH2HoleGeptOFgtsPObM7ufIeDcw3H3YbP37qGWx0RbINP+SEqTcNIECScaogoGcePTdBMjWF/OmVP1ZTaUAQQLzgXNyQHCShRiNLx+xAIwSEe+t7GEWABibPDSdukBNdUMaqqMhp3bQyVU1qnjcwpvE2r1KqKAqrrurj4ryjjlNHQ5klGX5/yUAYPyxlvSQgUaHYw9G3Hpb197iyXNG2bkn7jVFOWKJlMZiALlMzdy7h7p2BnMuXizh5XdveZiqYQwRiNEo9zltXyDl2zpCwMWiuOjm5RTUsOLx5w5+Ytrj3+KEe3btHYnoBw7+V7eO7zHuDkzh3WTYMEx2w6pSxLVsslbd8RtCaUhnazQmnou5bNcknXtohWUBjKomR3b46qSpq+YzabMbU9anEHFwKVMdxaH3OrWeCUw0uIbtnA6J5VIkmNxeu1Cpyf7XD/+YtcUBVl8qBorWi6jtVqxXK1wqsUdhYYY/eDj0JFSUCbgmoypbc9vbVpvDlKgDhFpDFK43XcNlRVNcpolus1iMT9QEqPywNj+yn+R0MRE3eVjuPfIhIXeiqoqzq2hlL7hrQd2qAolGA6y85swr0XLnByqooiKpAnejKZzEAWKJm7Fp3MsZO6Yn9nl/vOX+TCZIcqKFQI9H2HazYslycslgtCcFSTimpaUswqDu+5SN+1HC2PuHXnJsvVgv2DA+Z7u5R1xR9/7v/l9o1bBOeoipLz58/jXdx8bK1FKYWrKrTWyHqNc47Veo0Ae+f2uXjPFc5dOE/vLYtmQ92VXJ7vcGuzYb1cQ7XLpeec48Su+KPH/4Qv3nmchWvoQ/SfSNiaQxUQo0mEg91dXnL/83jZhfuZtgGcpypLDs+f53i54JFHv4jtO6wP6XPSBGljsFoIZ3w7s9kM3Wqs93CqohGCYHuHKQoKHeP0bW9RJk7nhFTlUSYd26mfiwzeFR0FZF1HQbJYLJAQx5f7NAmllIpZKklsIkKpwXU99xye5/GjI5abDda5bJbNZDJnyAIlc9cytAomdc25vV3uOTzPTllh+kDftqw2C9qTE5p2jUeYTCfsHx5w8fJl+hB/e//iI4/wmc98lma5YmJKgvesF0turm8QfMyj14Cvam7eukXXdSxXy3iy1grrPUrHtFcvgclkyt7eLufO7XPxwjkoNE1nqQtDXcyhKJmXJRfnO9ReEKOYGMX0gRdzuLfH5649xs3jYzZdjxfiHh4Eo0DrgnvvuZfX/YVX86rL9/EtOxfpr99msVjgreP27VscLY9pug0YGAsOw0ld4qwOEtsz3ntOFifJ2PvE5X/g05SNriuMNoS+ww2x9UohSVD4INFHolSKv08tI6VQhaFzlkriYwLE0emuwxgTKynRsAJBMFpR64Kuj1Wew71dbhwd0aUJrEwmkxnIAiVz1zLu36kq9nd2ODeZUkpAqUDTrDk5uoNr1ygD9bRmZ2+Pw/PnsK5ns17z+T/5HP/3M5+lXbVUukKJol1v6G2H84GiKFOwmdC6lkWziNuFnR0TVDf9BhDKsuDw4iGX77mMtZ4+9Dx+/TFQitlsxuG5c1hnuXH7Nr7tuDCbUjrHsm2oTc3F/QMOZ3tcmh2y2jQ8fusmdxbH9N4RFJy/eJGXvPqVXP3Lf5nnn7+I++J1mseuI4VgSo0LwqZd0/VrRFvEBCQEghecWLy3oDxlpbA+bk4OaYuyR/AGglYEB1u1onAu0EiDMYbeRtOx1jr6SiBVUgKaaKrVpCVByuCVRlzAO0cTOoyOLSMJ4EUwWjO4chWgJZp2VVBUWrFZN+zPd5jWE9abBq8CIVdRMplMIguUzF2L1pqyKJhNai7v7LMjmtoLrmlYLxf4vkcZzcV7LlFOKpz33Llzh8Vqxc0bN3j0scfo25ZJUcYTaxD6tqPrG+rpjP29PZq2YdNusH2fdurEkeDBWioxl4z53iGX77sHMYqu3aCspvM9k8mEsi5x3nLn6DaubzESCBomkwqvAqasoCzYA+pzBZtZxz3nL6Erw2x3xuHheV7wLS/hwouex2R/l8p5fDHhsc2ak/UCCgVeIYVC1wUqGChiUUKUQjwEI2gN1aSiCxbnBPHx+FEabRRKdBwxTp2eEF25OO9STST6ScqhHUMcftZGoVVMmdVKp+sMQSnc8HlJzHAZjLNKqTTloyjKgqoq0+6hgARBi0Z6y965XeazKcfLZZwY8k/+byGTyTz7yAIlc9dijKauSnYmU/ZNxbwH3ffcvnlEs96gAuxePMfeuX0WyyWr5ZLHr12jbVsee/wxQhCmdRwv1kHoXRcXAabf0Pu+p22j36S3fTSYpi27Q7x7II7WWuWxONrOUs0nzHZ2aJuGcl5hpgVN3xK0xJFiYuvEFJqJqqmnMzwS99KECW3Z0auAqUvmezucOzzP4XzGzqSiPtjDr1eEnZIrL34AZzcc2zbu9AkaJQVVMaMiYFcbnPU4LVDGdhTioFD0tkeURkTHSoqPo80xUTYlmKhhN6BE8y4xMM5LSBNFbCP5IW0gTtWVEPAuhtEppRAJOB8wKV1WofAhoLXGeU+lq/g8Nkbmax/i/ucQ2N/Z4cbREdY6lPK5gpLJZIAsUDJ3MUYb6qpmXtfsi6HuA4vlHTbNBu89hdYcXLxAj6TIdGg2LdeuP4qzPVVVU1UFJkDftrTNhhBiG6NZr2k3G3Rh0Bqsd4SQIuIlVU4UUXAoKCaGdd9g6oqgAyftMcZoqOHW4iaz2ZyN27Bu1hRlgTIFBVBPJ+iiiG0XHdse91y6SD2dYrVQ7kyY7c8oCigLAZZ4VvSqYbJTcu55lznuFsiJoyhr3HGDloK52WFjW3zokMJHr4gEXN/gFejK4D3gYsUkhNjuGWyyQ2T9sJRQi4ptIWKgnCYKlGFXIAJGgShPSUy5tc6hCxONtUHhfIeoGOGvlMKJi7kpwbNpoDAGHwJGa4JAIeC6jt3ZlElZ0RU9yj4d/9IymczdSBYombsWpRSTquJwvsteUdMvVqhNGl1NJs7lyQLrOkIIXL9xnccff5zgPaWJbQUN2K6jaxq6rgGgLAowGqUM0+kUGyzOx5OpUgrnfTR4GgNFQEpwOnDr5Iidgz1mk5rVcklV1Vz/oxtMpzN2d3dYHC/QyjCv59Slpi40poiLCCXtu5nv7nP//fdBZXAI1bRGzw1Lu6a3x8h6Sbfe0LQLjpYbqlpz/nlXeOQPNzjlsEZYLtexv2NAlxodDC44xCi8j/YSXRcor7A4lBPGjHuGqogaM0y0MihlsDbmn4Tg0UpQmO1jiIFvsVoSo++1BLz3lKairAq86wnBU5QV3jmcsyjFOIY8hL4ZASOKUin6rmNnNqWuK4q2iMeUyWQyZIGSuYtRCsoUWHZQ1BgvKOJ+mGpSIxI4unGDxXrJarXi1q1b9F1LVRkm05q9vV36tmOxXFDqaLb1zqF0PDEbramqisWdBW3bUtXV+Nr1pEZr6EMfd9o4C6FkuV7Rtg1BPKtmzWq5oiwrrt/UFKZgPpuhizimO6mnKISubzFFRRDobcfR8RHV3pyiLAldwJQG6zZ0fQGuRIxHjONOcwQOKl2w0ZY7m2M2tqcLPX3bYr1FNJi6RBMI3uJ8Kv1wavImnfNVEDR63LAc4+oZ7zNs1BnTbNNSwHRlzEYJAe/jfp/JZIL1jt5atDZjayakhYgiHhHFbFYxm85ZbzYA6KjVMALKOmqt2ZlOWW426fWyESWTyWSBkrmL0UpTG8OBKTlfTjAusLM3p/OerukoCoM2BevHT7h54wZd16PU4CMpsLZluVrQ9Bt8UTKra9DEJXkhEAjcuHWd49UJTizdpgNgOpnS2gaUoMsoNvq+pZpUtKuOtXcoJaw3K8q6QnuFdQGrFWiLrgJB9+iJozY1TtZ42xKC4uSkZ7E+5tJzrjDZnREM+HXPyq2Y1eeZlLtMJxOcsbRhw3q1AidY3dKrBqs3qDLgmx4nluAFowuKqqRw5ZgY66zgXfSbCCpuTw6SYu2T6FCkHUGeEGLKrKjkRUEhykfRIoJOY8QxI8XjxaO0YNCoLtB5jycl5PYu5Z/Ei+09zglaFeNr6iDUSmE9nCw37EymFNqQ6yeZTGYgC5TMXUtZGHaqmovTHfbKCr9coicTfO/o+5bVumO5XHLn6IgQHODxwaGCprMt7Z2GpmkQE4WGVwGHwwVH72xqFUHv7DiOC9C5HoejnpRMZ3N6Z3HWslossMEhQFkWzGdTTFlgnae3PUpDPS/ACE45umBpmw7RAfGe1bpDKcNkPmO1WdL4hmWzorErXOkp1JKdcMDe7i6+7fBYnHT0XYenx0mPxyE6GnHjwj+Pdx5BY+oKlML2Fohbh32Kt1ch7gAa8+RV3JUTEHxaHBgLIFvjq0pfF1rHGNlURYn3EaztsDZWU0RiVH4gjhKTngvAec9ytaIsCkxhYgsNhUZToQibhvl0gkk7fjKZTAayQMncxdRlyeFsxjlT068b1osF0vfcPD7Gdj3Hx3dYLBd4Z9ndmdP2HV4CvotTOcPOmtl8znw2R6yjaRs629M5Oy62G+LZbfA42+O8xUusILCBoixwQcAJRV3GCoCJkyur9Qrn4qitKQ1KaXwI9J1j0/d455iaGuWgbVuULvAKODrC1AWbvqH1G3zh8KZlrRuObt4kWIv0jm7T0qw32KbHeosdjKcq7Skqi+g78dtdO0OQGipG4AdAiRDYLuQLDNM6sZIULSpn6xdKKSbTCbUuaDcN3sfKk3hP13d4F6d5SDuFtAhGG5SE2CJCpb1DcXmhTiPHSAyJK0qDFoXxgUprpnU9jjdnMplMFiiZu5a6LDmsZ+zrksXxMevlEtu2LBYLTo5P6G2H9Q6loHOW3vWpRuDx1uO8o6prLly8CCFwexmTYjvb41IYiCCUk5rp7pzNZoP1ljjgoglaxQh2CSgvSFXiVKDvoy8FCbGwkM7rFTVt1+ElVmWGcdyVrMEFCAalHY3r2PgOCoNT0W/S09E7WIYVVVWgUyOmazq887i+p3WWXhwFGqUNygRCYPR/BGdjxQSSp4TRjxJUbO8Mf7Q2sa2DIgzBbadD7SWKi77rQUdPyHCrINi+xxcarQwmGVslCNqYVH0BlYzBkg5GkmBRWkchSRolp6AX2JlOY7hbJpPJkAVK5i7GGMP+bEbRO06OT1A+sGhb1k1D53ratsEHT1Fo3MbinKOuK/o+tnq0MZR1zWQ25fjoDov1MtYWtEaCj1HvWoOC1WZNb3vKqsSkcdiiUECIKa0ScMHh25j3EcPIQHTceTNmg3jBNpu4oRdidHwIqAB1OUOLYL3DtmuKaU2PI2DBeLxXmNIh8wlax+e33qURaodooNTY1sfgNaewwWG0Aa3xCM7HKRwbQozR1ybF4iuEuLV4yHdRxsSQtxCAgBINSsZJGgkSTcIoDHr0oCilcSlNRSuVwtsguNjq0SqJnTTxo1I5pyiK6IEp4vfWOlSpKbVBO898Et93JpPJQBYombuY0hgqpWmXK4y1VOiY9Bo83jucczjv8SGaOk2h2T93jsUJrNcrCm0wpmC5XHHnzjE+nbydc/TOxQh3BevNmj54tNHs7u9RFiVd1xJCdGjoFNYWQsCGKDxKY1A65oaICIUxSAg0TUMg4NMotPce0s4fJQVFAdZZFps1NCaOAxcBXQquFWgkjvkaNY70Bh9QIpgiCRHfY+mxwYNWBJXMqcOm4tSqEWJrRymNqG3CKymIbjqdYZ2jaTfJW6LOdHlUEh+EaKRVgCkKVAjpNWPmibCNsw/eM9RaVGqhRb0Tqy5KK4zSFEZHc64XdKnBOiaTCqNyBSWTyUSyQMnctRQo1LrFSTRqOu8oKoPpwbmeznagFdVkAigkeHrvUGWJoLFO8MsNq+WGvutSSyaKDBeiidSEuAQwnmA1zabBmg6lY0ibVrEKIUoRCATv064aAzqmqzqReCIXaJoWJwHnHWVZUhiTNgdboEU7hw2OzvWIKCblBBUgOIcYYA3OC2KSWRWNUjpWMFCgBDGaHk+vHEqbGDCXPjOFjpUQAloUhChyRKWY+TPpsXERYmEMwYfRt5L6M1HQ6GFyJ8QcE9FRfgiI9Xgl6ELiZuNtNG3UOd6m2oui0AZnO7TRKFVgKFEiiA8EHKICmjJ7UDKZzEgWKJm7llJpKh8Q72idQ6MotaZtN2w2K5y3TGc77OzvUtU1t2/e4ubR7di6AUJvca6J6aUm7plRWkNh8K7n9CJgrWKOidaaIB4V4slclyWmLFEGjFI4n8ZwtcZHeymS/Boi4KzFITFlVRvqso7CJgS6viUALnic93GfjXhUUGnZn4CO24N1rSmqIlaMvKNQBqM13rto/lVxjkcpjQSHnApii/t2hmkaNU7rxHVEaQlgCPhmkwTB1n+i1Km/T+3TEa0IAZwETHq/0WAbp32KMobiqW2ayqBV0LqgnpQ0bQzKc9YxBqGIQAiI8mmjcxYomUwmkgVK5q5lVpZMtMG3lmAt1lpMEFbLJT6kBXcadvf28UO7Q9LJMgg2xbMHGw2rZVVR1gXSbtDGxpaPxArCbDbHB0dVV3Rdg3OOoq6oygpdGMq6RClF23ZRXLgQU1p1bI2EdDb2PuDSQjxnHS0twXm8D1gbJY0XiZM8XuOcw6g4eovWiIoG30pPKEodI+ydI1p/Fd66GIKWxEcIQvBC8D6NEpMmZ4QgscUzijAdNw2nWklqYREXKcK2vTN6TQYhklYJJvHgQzgzAeVc2oJszChOhjaShIBzjvV6hShFYTSS3pMxBqMNWgniPZpoms1kMhnIAiVzF1MXBbXWNK6h28T9Oz541r7DKkCD9R5PoPeO3cMD/J1jCmNoV5vY/qkmSBrLNVVJNZ2w7lq0KXEhZqGIS+0LpXEuIEEBhrqaMJ3OcN7Rdz3WOax1iEDXW4IOcdNv2mcTfBzWUcqgtcY7aL1FXBrPTVUHl6Z/fBA8UEoZ9+d44tZiH/AbixUdWyxpl46EQPAhZZIA6Og9CQHvAspHQcI4aKy23hJStolOO4aGe4icSoCJqFNGlEGkDF8D0VcSr4jfEseGtRqlDj54jIrbk51zGMCURXxIahlp0RijES+p3ZMTZDOZzJYsUDJ3LaXWFEohCqxEcdI7h0NwKhpT53u7LDdrVKqQmKrEe8+q2XD/vc+hd5Z116BQtK5HWoV1nqBUOs/HLb+bpou5HF6hlKB0ARj63tE2Lb2POSQ+mUCDB1GBclKgiCdo71NKqo5+lD5lf6hxNXBcvichigOPxysQpZCCOO0jCl1opAerXKyiAAyVEpfyThx4L9E7EsC5gHKBUhu2IfZniRH26ZthFFjYVj3YtrxIJl+lNUYNLSxJEz/Dc3BqK7KkZYuMo8bDawQRtGyfP6QKk/ce71JLTdJr5k3GmUwmkQVK5q7FO4fteoyPaam9jVUMUxRxvBVFXdccHx/jvWc2n9P3Pe1mQ1mWtF3Lar3Gh4DWit7auAyPWITwACGglcIUBbPZFGt7ROJv8s46nLU0TYNXZ3+7FxFEpb0zAgqHCzFOxDuX8kJiy0f57Yk3pK3DgZhvZsTExFcbGMoPWqIwUF6lDLRozh2qJ3E78VacKKUJ3uNtajklk+rgixlEQVwBoJLpNrpFVIqylye8twElKdtEq1FUnBUp23qLJLNwVZaxfeb8GCg7eFniFFa8f/Cerg+IjlNS4qOnJpPJZCALlMxdjOstXdui2w7nHd55go95GX3X03cd165dQ4h+j/lsTl2ULNseozUniwV9H82wSpmYrJoESfKjElKux858zv7+HovlCZtmnVo2HhFPEI/IICBUygoBlMb7KBBcGMyyqSKSWj4MbRdJsfLJoxJScqsRRaEKJPjY2iFOBhWFIDbE5X/Eykfwilj3URgx6GCSRTUaWK2NgiDmkOjRPxK8jGJFKY3SGq112pcTsC7G1Y9jyMQWkEoZJkpSpUSDlvi8gwBSp6aTtdKUVZXGk00Mw4sPi8fiJU5HSQrJ8xpBo8q4uHEQi5lMJgNZoGTuYsR7+rZD9x1IDG4L3uP6GFN/cO4cq+US5zyIsFmt6LounnS9p7c2GTnjSc+oOC4cs0CiaNBKUVcVVV1Hs6fSY0XA9j1JMsRoM1HoIU41hZENe2vi/AophXbwpAjBuZRFQtyeI8mLAuBBvKTjEqz3ycAq9G1HaGPwWVVXKfk1ptcqNCootBTx2CQMsbH4IClCbZtpEg2tKgWxqRSclvwlREETnigM0ihyCCGKkSQ2IGafGK2jMXl4LgGjNWVR0Pc9zsZAu1EIidBsWsqqjNuMIVZ/tI5lJwHxo7kmk8lkeEr11F/8xV/kla98JXt7e+zt7XH16lU+9KEPjbe3bctDDz3E+fPn2dnZ4S1veQvXr18/8xyPPPIIb37zm5nNZly6dIkf/dEfjWmbmcwTkCA4Z2OSqgQmkwkoRdf3TOqa3Z2dcXR4MpnEIDXrxjj3qBYU47lXKYwxsWVh4omzqir29vYwRuOcjeZNHU/kzrsxmv20dTSdT+NrJNPn6dOq1vGYQoiBbTEgLm0WHuwoQcbj9c5t2ycutrO8jZe+6+JYrsRpGyUqVmFSaqukLcRa6/Te0rBvkDH5FVKFI71nnyZrrLXRHAyUZUzQ5ZQpdpjDHgy6gyGXFE7H2OmJr2GdY7Va0fd2PD4YWk1RSA2fqTC0krZhbkOlKZPJZOApVlDuu+8+fvqnf5oXv/jFiAi/8iu/wt/8m3+T//2//zcvf/nL+ZEf+RH+y3/5L/zar/0a+/v7/NAP/RDf8z3fw//6X/8LiEbCN7/5zVy5coWPfexjPP744/zAD/wAZVnyz/7ZP/uGvMHMM5c4AeIpJMSxYWvjCVKE5XJJ07ajOXOSxErTNIhS6KLAOYvR24h2YwzVNAqZ3jtUYaKx1hhsbwlpM7DSGpyLuSilwiOYEMPOhqGYQQJFL8spm6lSGG0Q72MWyeABcRIrKGmaxyeRE8TTNB3axFHlkF4j6FSoCeCtQ5cTFBpRGkmGYcZlfFuBEnyM8A9eQGlENEFS1UOZuKvHe0Sf8pmklo8AKvas0g1s39eQh0KUakFSOwlFWRbRR+N9CoKL7S6tDcYYnLUxWr+MgXDBxJ8FMLahSFWZqqq+wf+qMpnMMwUl8rX9ynJ4eMjP/MzP8L3f+71cvHiRD3zgA3zv934vAJ/5zGd42ctexsMPP8zrX/96PvShD/Fd3/VdPPbYY1y+fBmA97///fzYj/0YN2/e/Ir/47RYLNjf3/9aDjvzDOCvPu8FfHu1j9q0aCGN07q0wE9i9YToK9nb30drzWq5xJQFs50dbt++jU8ZHUZr5js7HF64QNt33LxxE9f31EVJkaZugngkeFDxpFkUinoWtxx3ros5LMZQTyds+m7rsdBDZSIu39O6INgQT8whGlljxSQGtCmt8TrmiSigrDT1tEIQrETTaDUtMUWBdVE4zWc7lGWNRhM4VUFJ1ZS+tzjrCa3F20DwcWNx8IrgBCMKrQoCcexXdDT5RitNFHGjWBjGisdL/KP1sFtHxmoSSjObzxCgbZtYVBFBgsaYgqIssL1FI1Bo0HElQVkUYwCcMwo7KbG7NZ8+vsNnPv/In/u/tUwm8+fLyckJe3t7X/Y+X7Vl3nvPr/7qr7Jer7l69Sqf/OQnsdbyhje8YbzPS1/6Uh544AEefvhhAB5++GFe8YpXjOIE4E1vehOLxYJPf/rTf+prdV3HYrE4c8k8G1CoFNwVUlvCOTdOkQzZIrEC0tNsNuk2NZ5wB4bqhvOOvu9RRjOZTlBKxXZHeu7YThKqKvo+IAqQMLRgRHCnqiNDS8SHkK73OBvbJ6dfPfo5tvtyotclBq0553HWJhNwyjoRwegCowu8DWzWG/q2SwZVjdYm5o7otD04xBnmYRTYD5WUoTUjgg9+DF2Lo74h+VqGFsyTtFiSFwWgLMrttuFTd+v6Htv38aFDfGz6mXnnT/l00vOFbSVpaHVFH42cyWDJZDLPbp6yQPnUpz7Fzs4OdV3z9//+3+fXf/3XefDBB7l27RpVVXFwcHDm/pcvX+batWsAXLt27Yw4GW4fbvvTeO9738v+/v54uf/++5/qYWeegfTi6bzfjp4OiaZKjdMe2hj29/c5ODiIvg8RbN9x5+hONJqm2HutFM5Z1sslfddR1xV7B/tU0xqf/BESFF4EpQx1XVNWJW3fsdpssN4hSmG9Z71p6K0liMR2jhB9I94jw7bg4Elxr6T5HQQHeJQKlAaMEkiZIOIVzgnOSQxrswoCGG0wytC3PZv1hq7r6DqL7T0SFEqZOKEkJqbESsx1kfR5Df4OGf+EMfzkiR6RJ2Pw26CgKAtM2tw8+nAkYJ3Fp2WCcSni9j8sMrS5RpkzTPT4OPI9jF0Pgi+TyWQST1mgvOQlL+H3f//3+fjHP8473vEO3va2t/EHf/AH34hjG3n3u9/NycnJePnCF77wDX29zN1B5z2NS/4FFVsRY4LpqWkcbWLM+pC14Z2nbZohACSNyQIS99pohKIwqEJjg8MT4nRPMsM67+n6DtFRsPTOxoXEKFLY6ygA4pLAaFwlpDTUEFtFMbgsEMQTJIoTxCPiUAQ0AYJLxljBu1g9CQG8FYILcfNvUUAA21varqNrO/rW0nceZyUl3w4G3DRinL7XCEoFUIGg4g4fUbG9M5h7Q6oAhaGqMYSynbp4CWNLK+0gHC+j9ya13AgytoXiTSqm+bKtxmwrOmE0zI7G5kwmk+GrGDOuqooXvehFALzmNa/hE5/4BP/yX/5Lvu/7vo++7zk+Pj5TRbl+/TpXrlwB4MqVK/zu7/7umecbpnyG+zwZdV1T1/VTPdTMMxxRUQwoHTf5ShBMEbcDp1oAgrDaNDRdR1FVmMKwXCzHtoVKqaaSwsJCiGJkVtfs7x+wOlkiAmVVUuiC5dqO7RHl48QLqPQ44lSLGqZ6DIiOokB0mptNxz6kuQ7VmSEoLY3uOtvHlk5KbPU+QBErPZLaPtZadFExOEGcc/jNhqqcoFWBD4LxPmWhDB/aKfNraq0E5+OHqdR4/j/TSBkqKV/mZxFE2Gw22xbP+CRqvF2rwUAb0nBT8q2QDMZf8rpD9SZVWCTLk0wms+Vrjm0MIdB1Ha95zWsoy5KPfOQj422f/exneeSRR7h69SoAV69e5VOf+hQ3btwY7/PhD3+Yvb09Hnzwwa/1UDLfZATAEXfo6LJAdJzOUSa2cnxqEzRtw2q9pu1aXEghaMNI8alRW+cC1gWUNrFK4UNMpdWa6XTKdDpBpfaMtY7VakPXWTwxon64DJWKANi0CDAVa3BeYmBa2E6oiCgkaCRoEE0ICtsHnEubkVVMUSl0gVGGISQuihQfX5e4uyc4Gcd9xccdPz6JFNCjJ2a8wDhtJKfVSfqMviJUSquV5LM59bykKooLns72OAJBQ1CpcoQgSrZbigcD7ilPyvB3SK2eTCaTgadYQXn3u9/Nd37nd/LAAw+wXC75wAc+wG/91m/xm7/5m+zv7/P2t7+dd73rXRweHrK3t8c73/lOrl69yutf/3oA3vjGN/Lggw/y/d///bzvfe/j2rVr/PiP/zgPPfRQrpBkvoTGOdrSYyVQjlHtp9oOxG3FLsTKhJOA9i7uy/GBsihi9kc6OYY0+rq7t08QYb3eYHuHUipG5FuXvCMplVULoqMQGdpKsQSgEQQfQFSsgMiwKThVaRQxTVURtwrHFpBCsTX9imhERcUjPqCCStWWZGp1AWcD6DgZpFRABRAXiJlrBjQE5wgeiqKMJ3gbkDQGLCm1ViS+/sCYQZLe25NJFREZg92CGqos8XHDdM94X+KW5Hi3NOIcYva/UorpbE7btjjvzjxGqeQPQrDO0fX91/4PJ5PJfFPwlATKjRs3+IEf+AEef/xx9vf3eeUrX8lv/uZv8tf+2l8D4Gd/9mfRWvOWt7yFrut405vexC/8wi+MjzfG8MEPfpB3vOMdXL16lfl8ztve9jZ+6qd+6uv7rjLfFHgRvNa01iEh0Pd2jFj3Els12sTNwW3bopTCptC/IoWODcmww2/9zgcKU1BNJty8fYu+j6PA69Ua71xsT2iVtgJHtq2JZMxV0YsSEDjj24ivE1JIWlwaKEiIQWwIcfNxei516hQfJI5Qh1NmVu89OqogtCkoy5iJMkwNkapI1lqCj2Fripjo6ocpplMVChnEw5n/n97rmH0yxs+Of4vafh7jEsChMjWk8o4vE5/LFEU87iQO266LgYxjTsqpSsmwo0dILbVMJpP5OuSgPB3kHJRnBy+65x5evXfI5S4wdYJdramLAoWi7TuU1kxnUyaTCcfHx+NCPQUYgXPnzgHQNk0SKFHQnDs8j9aa23fu4Poe8SFVIRyi4r4YpeISGa8FlyoQMpyQSUbR4UQ7XJcqJSEEVIjbliEuHfRpmkjpwcwaT8RDkL4ohSni/Ye2SFGVlHUZOzcpmTY4jzEmjRDH/+k679Fp15BCIR76routpxD3+MTpom0F5/R6wCHETmQrxQbD7BN342itUyJtFH7j53BqYzNpfYAEttWZ8e/46lrHTBRVGEJl2BBo9yb8n9u3efTWra/Tv6BMJnO38pXkoORdPJm7Fq9gbRRHvue8pIqIKSiLks5aYqR79EcMfotULonVDO8pyjKejNO4bak1hTJs1hukdwQbl/SFEFsxomJGCaTf9INCCLjgEaXHioDolGEiYZvEKjL6VBDSbhnG6olXjHtwOLW1N7CdZAE1VmyCC3gdKEozZr9obRBU8r1IMqJqTGnGSRxtDEVVINbTd240FQ9Nma3miAejjaGaVKkSc9bKqtPotkhIuSsxgA2lUsstDVKr6BHWaVvysLgwtoOiMBxC4aKRVlGWFYFk8h3zWZ5xvy9lMplvEFmgZO5aXAiousJvejrrUM7Ro8d2hdKxpbM4OSGkiHaFGq0iq/Wauq7jb/smCpad3V0U0DUt3sYNyZLyOOBLsziGqZvgPZ4Qg9GMxmgT2zthyFCRrfGT2LKJBpGxt/JlJ1SGSoQizUQnn41zDl1orHdxbqgwSeTEtogxKo4hpwpFLGAoirJM+TGarrXRW8OZQZ4RHzybtDagUOZPOcLU1kmpuUExipPtPRj3GPmUmKtgXJCotU4LoRWkz1SUjNNNRVH+qXksmUzm2cfXPMWTyXyjsM7RBk9fGrq4LhcrcY+OLkysBCQfRjQ3bD0Sp38Tn0xqDg8P4/4XEdarNb21Mbpe67hhV6skKp54glTRzJqC0HyqzpRFhdYxnyRmn0VxZIzBpCqBpMpFXDioxpM3MJ7oh9cY3CiDb0ShUiaKxNd1AWc93kZPSlFUgI5hbaIRF8ALKr2FaNaVrVdkeP5BAI0vna4fFgKmasmpm8a7D1Uen8TF4OtJB759r0NVR8cMFzXeZWtwFhhTe4ftzmaodmUymQy5gpK5i+l6y0nbUJkC7R2liR6KsUvCEDSWvouKZTR0ylD5SL4QrTWbzYa+j9HyQYS6KJjMapbLBc7DIFCUUpRViRPBehu9G4OJIoAf20OSujXxcbP5DAmBk34RzbJaj1WOoBTW9ggp70QpVAipaqJGA62EAIFxgsgmo6xIFCCCQpcFJgT6psUn341OwWtehVGUeBfG9pAanbJqK1A4K8m8+DSBJKNfRZ0SMR7QIqC36mWo2qTYl1jN4qwZdhAt6SGxQhQCWuIaATGA0bmCkslkRrJAydy1+ODZdD1+f8pGw74uYo5I2lWjia0CiBWJIm3zjUbX7fI77xzr1SrG4Lcd3qWTo1IxMVbFBNdYbYivLcR9Uy4MngzNMPXifaBtu1RJiCfZ4WTcdd3oJRlaTdE861Nb5FRV43TrR8XpG1QUJeJ8bGOh6Pu4xbkoCowpGFJiq7Ii2BToRgqlI5p7TVEw7Foe3tFpYfclhQqRU6JlO378REKIfhmjizNeFqWTgAtJSBEncrz3cZliqhiNYgbGiosg6KKgquszbbJMJvPsJguUzF1LEKH3jg6oZzXNxjJTmlLHSooefBjpt/PZfE6zWeP6gErtHIiR6sMG5DjZsj0Jeu/pu9QOER1bJDpWYqx12BAIYsbCA2zFyzDBs51igdDaMQtFa0NZloh3WNvjkeiH0bG6Ead+VQxyA2KsSTSZeh0X6pFO8sPzCUkkWBnFjwRJHpQkAqKhZWyxWGe3OoWtSDojQMYslCcXCKf1jBLBKIVKRmGIO4OENIEUkgfndGBcauPEtzyIt1R1UQpTVzBOEmUymUwWKJm7GO8866bhzmbN5dmUddszF8FohSHtzunTWK9WeOfiRE1hTvkeAp5APZ0yncw4un0b8Y5hhR8EggPQUaAIaZdemvyJimCscsRtweDlbBViCC6TQNoRGOPwi6LCK4Xv03bjFLvvQxQ13o82FvCx1SEoXIiZLSH2dPDpe+tDEhZxU7AMSbUkYZL8LBLz3VA6vSGVRpQ4e8yDSBqqOqcrGGkYKb637QRxzHiRKKZIu3QYK0MS9/2EkMRYEk3JyKKUQlL8vdKxzeUUMKlY9R29c2QymQxkgZK5i3E+TpfcWSzYO1/F7cKtZ4pBq/jbutY6GS4dTWjGiZa4ODD+dq5V9ICUVYXWGi9PCAsbx1+e/DjGRszYktlOzJx5WKpCqDRybEwR80vcEL5GEjsaVBrRDSGlyW7bJ4KksDgdDcDpWJ21uLKMrayhKiFxCkZJrOSMAXPex2mbEDBap0mlJ3tz23fwZAJljKZ/wsOC99vKiIrva/jcRVK7bPAFJb8NKVhPGT0udhQFNjiKwnBnuaS39s/8d5HJZJ4dZIGSuWsJIdBby7ppOW42TKsJy6anBqZKo0OgSuLDdx4fPHVRM6smLBaLcapHKUXbtnSdHYXLmZ7F6UrIn+aBSAUIpRSFiVkkzkZvyJisGp8gtS6SD8M5nPWpopLyWRgMradmVtLJnRC9IMpET41zbvRtxFyWgCpLVBIgp3cNkVpOw5SN0orANoRNQhhf87SoigKIcXHheBNbATZakyUl6CqBNLqMgAypucMU0hmxI8P/jV4arbfZJ15BNZnQrBZfMuadyWSeveQx48xdS0iekd5alm1LVxV005KlCjRa6DX03tO13TB7g3eOaVVjlB7dp0qZbaZIChtTGpIddXw9fTqIDdmOKqcqxTCGOwSenZmESSFtpKAyCXE5YdfZuIcnmi2I+SUyjg8P00dlUaCViW2aNLGzDWfbjiRba/HJTwNxfYRJibVy+niTgVckYEz0wqhhszDbvUZnRoCHUeNTI8dn5cpWwJ1dGDhMU6WQ/rGKIhij0EafOS5vHRpSJSgQCkOvoLU2jxlnMpmRXEHJ3L2kE5rzjqbr6ELgcH+ftrtFLSGKEBVP4IO9wjnH0dFRPLmf/nU8VQm0igmmYsM4Dhu9mkm4qDRdkjwiwjDgosbWTfBpEd8TeibDRMrw9TDBEosK28kVf+bET/RqjJM9AWX06NfYJtSmE7+1KK0xxlAYQ1kUMZPkCcfiU4VFKYMbDL3jEaS/B7Ps1o2SvDenjk1rCq0xafnfmcRXEYqyjMv+kndEsQ2OG8STEMeyT/8sSD8LUFSzGcumYd20oyjLZDKZXEHJ3LUMJ0LvA23Xc2e5oC8MzKY0InitKKoKU8SFgcMJz4WAqDOn4jRpE0Plp/NZTJRNo7GDV2IYMx6mcGSwnahYBSiMSb4OvxU3cFaUhIBLSwfDEGqWws9iSyae7E8HtYkI1tr0OKEwRfTMlBVFUYztmbEC4RzB+fGYjU7HlUSRTwJqaAEF7/EubCtKbKeIhhj8MyPIQ6YJ6fnCNkfGpPaMTvfx3kcBRPyclI4ptsbEeH5rXXztU2FxEItJFqFXUO3MWXcdzrvc4slkMiO5gpK5u0nVA+ssJ+s115cL7tmds25apgrE6Fjp8MlfoZN5Nk3ZKBXQhYmTMN7HxNdCoShQpkCJj20ZQFSgrEtc7wk2tn+ixyS2aOLJU0bxogYfiZco9VPryIcQ2ygqjL6UuBAwjKmymCgOnHMoTBQKAkFJHInuQxQeOo4M67EKo1AhLuyToAgelDKAQaUE2BBSNUOlSaMAKqQxZE4Lt+1nLAhamTGfZRBmQ1JvwFOXVTIlK4JK7pSh4jRMPaEwKiX8B7UdHJIUCqNSTL4CqwKthkLDstmkVlgmk8lEskDJ3NUMkyTOe5q242i5ZFZV1LOa5aZnbgwzk8Z7JeaMiGwnYUKa5MEoxEUj6nK1Sv6PATW2TQa/hnUuTb6EWJEJIFrG1Tpbu8d2msaMbRlAmSgeYNw+PPg8hjFgo3XMBOF0Vkh8r0YpnHcYSZWhokh7f0hVHKHv+7Eqo5Uac0TUqAWiOtAowukEuie2dJ74mac2zpjIS0yVHaozYTDaPiFLZWgZjdWnUx7ZJ/5MgxJ6EdRsQpumtax3eVlgJpMZyQIlc9cTT+xC7xyrpuHOZsPl3V26ICx7j0FTqCgEiqok9C2QPA4SWyIQp2880WgaXGy96DTSIxJwXnAbix+GfU+ns8E4rSPeDzaKeHwIEjziAOURBFMU44SP1jqdtAU9BMil9k98jRTelp5XaUVV1fE4gx/Fgtaa4IdWVdxsHBNq03MPyiRVe7z3GBOrMDoo/Fdw8pdTXw3j1JyqHI1Vo/R5PLEnM0wbwTbaHnTac5TyXghoVeC1UO/vctQ2tF2Pc6e9MplM5tlOFiiZZwTR3+HjRM9mQ2UMF2ZTVnZFJZ45UJgYl269xcfFOrFtIXFx39AqGbYTj94TDZAW8xF/89+eJ7cnTKUUO/Mdjhfr5PPYjs8qiBt6EYIStJjRbNt13fgcWus4OzT4StJjTRIxCsFJSNuFQzKTxnuJIqbRksZ+k3dlqJRsjbbDxFAMi9NaEyRWdqIJVXjifM6QI7t942czUU4/vxqOP13GUech8wQwOqXFKo1CY5MgjPH28b2E0iBVxfL4COvclxqbM5nMs5osUDLPDCSe7K1zbNoWrWBe10zmMzZ3ltTaUGkFWqU2i9puGNYa6y1yaokdanBkqG1MarotDQWnTsjWyCo+0DQNMFQKYHjgacMrKMqqoggFm6bZhqcplSTDWYEyPH4IN3ti7PzpUWBOiYGhFUOaChoFyhO1BoNhNraxnPfJIjvcFiPxtdqON8dMFE7rsyf9mQzv67R44dRxFYXGmBKALoWwKa3og2O6f56l7Vk3Dc655N3JZDKZSJ7iyTwjiOO+QxWlp+l67qxWhNkEN6npFPQh0PX92FYZtxunkddhZHc7gqvSWHFKOh1fLLZT6nqCSRkeQ6S8te5UIBuDTzSe/PWQRxLouw7rXKrQ6O2U0SgutoJimP4ZqxJBxpyR+N63czaDENGnLsPxDemyDJM4jEklqDSOvZ12Ov3hylZUDO9NbZNgt6PI279Oi5Ltw2JSrEBqqzF6VGL2jEIZjdcKNZtQ7O1we7mg6bro8zlt7slkMs96cgUl84xBiCfy3jqg4856xaSquPf8HoujE6RziBMKiZHvp9X3IASCpM27QvKapJO5gKAJygNxZLdAAyZO8QTQ6K2QGaoZcnoqRsXnIND7kGL1o+AYMk2G5/bxAXEMeAxx08m3wvg6SqV9QCi0iobXYb3O0PohVX0GkRHwiB4mj6K/JqK3wu0JHpreO/Dxec+m4g4fIOMOHYGxrRNfT0ARJ4tUWhiIoMchooBXAWUADbaA6twux67jeL2mS16b06Isk8lkskDJPGMQAQmCS6t51w3c1MdMq4r7Lh7SPH4TZR1zpTACMowPn3K0SjJ6qmg8SVUHxp6OSm2Qvu9pmnaMtzfGYMoC0RoXfBzpPXOSTyf3IWNkCFcbJ2KI+2rSawYJ6diIYWjG4LVC2+hjOW0WHXVIfGaCgJaYRHuqoxPFQwgEwrY6A9u9RCh0ykw5PWg8VKcgbVOWKJCUSNw0nCacfPBIkFHkKK2jaExx/GUZWzlDhcoUBWgVU2uRaI0xGj2fwHzKzdu32HQdLgXNZYNsJpM5TRYomWcUQQRCwAH0sGDDNXPEwXNmHFw4pLl+k0prCjQqxI3IajBfqigGYptEjxM5DI6MpATqyQSUovUdPnjQ29FeH9wYE/8lp9MkZobFfVGQJN/JKbGwPRGnikQQrPU4GfJVdFogKGMuyWjFPTVRg9ZjleL08kOtU4tLa1QYIuwFdMD5NA7MWXH1ZT/z0ZOixp+BJk4WqRTONj7fKV+KhDCON4uKO3esgvJgjxuLBbdPTmi6NgoUyQIlk8mcJQuUzDOOwchpAWzP8XrN52/fpLh0mfJwn9XxIqa+kk6MEmLQ2anf+uMQzFDlYCxDhBBYNxsAdGHQIXlAlI4+kSDj4yH5LtCjiHjiKVYrdea6rStkQKVjiPkioDDKoLUCcVsj7aljFKJ5TMawkW0lRKdWUlGYKBLUEDg3vHIAwiCNTomUrcgI22nl00PWZyZ2TpuC1bBR2rvxOEDwElBpzWBQ4IzGz2q64Hn86IjVpsE6P8bmZzKZzGmyQMk84xgqBjoEvIe277m9WDKtal5w/gLKe9p1G9sVCsQHysFrklTLEDYWFNEnItuhHZ9OtBqdWj6gixje5p1HvJz1aciwWFBh9FaQDD7V4SQe/JfmfGxrKWf8p9FsGgLO2zS2a9BKR2EhEtspKsbYnxFLWlOWMXBu3I+jNSL+jMg5ewRDY+ps6+eUA2Ws5AyvMQqvU+9n3O2jTj0mPZtXwKSmPNjj8cUxy82a3tqxopTlSSaTeSJ5iifzjCXuutmOHt86OeHWZs3s4kWs1rTe4dUw0ivbcd8nQ4ZJGRlbMT6pFhHSZuDt3beTO9tFf9rEcd1hymaYgjHGUJXlGHIWH781nJ5+VgFcCHR9h7MWo0xKx5XtBuKxSpL29AyTSjoeT2GKrRl2OJYkHL5En6RjMdpEzwinNMcTPq7xfapTo9DqS+8zJPKqtMTRI3ijmR2eo5HAomnonR/3FOXqSSaTeTJyBSXzjEZEcMHTWcvJZsMXbt+iqkou3neJ5aPXmLdb06wSDUph2bZ14vBMbHDE6oZBK8GmlogiGWl9SK0lT5z3STFoKm5AJsQKRZyu0XFCSLb1iNFnMdZKFDHYPkXfK4VINMhG10pMnTVag0QPyFilMTpO3TgX21daJyGi0QKu7yGJA4mGm8GfC8T4/9jkGY4hxtOrMBzf9vNRDBuP42cUJOC9RAEyjEsL42h3HC3aqpYAWC0Ue3OaUnPj9oJ12+K8y9WTTCbzZckVlMwzmiEG33lP2/ccr1Z84dYtFkqhD/ZZq4DTCpSOI7pKU5blaIQ9vVV4MIEOv9QHBM82MVVGg+vWCKpT5UKnFkg4lQ47CIche2WYZom3n5m9ASVJOGwrIlE0DV6TeCnKgrIsCSHgwnajctIhW8Ns2FZbVIqcF0DpNF1jUprrqUrO8Nrj93LqvacXGce1R8EUGTJWkJR5ojRBg1WCmlRU5/a4vVlxvFnTWov3Ofckk8l8ebJAyTzjGYLOrHM0XcftxYLP37hBP5+iz5+jqRR9qVJVQYFWFFU5VhmAKCSSsdMNKarjTalNovUTX/jsCXy4P9GsOqlr6qLcejfUkCPLGSEQW0opO0RrijIWNn2Ie4TCKXHjvMc6G/NcUjtlEFaD3+XsRM82mM37If5fYYY20Hgs2zC44RhPPW38fgh6M2a8l0rTQlorjNIUWmNSZosHpCyYnj/PUddyaxGrJ8NSwNOvmMlkMk/kaxIoP/3TP41Sih/+4R8er2vbloceeojz58+zs7PDW97yFq5fv37mcY888ghvfvObmc1mXLp0iR/90R8dDX2ZzFfD8Ft9b6Mf5fZiwaPHx8jBPv7cHstC0RtwBnrxLNsNTgkOwRqFmCRgSJWEU36LqqpiEq1SY3Vg8Iv4tH9HVNzQG/AgAYJgux7nXEyGHfb0PEEMKLaVC9gaXYHYTiHmmgyPcd7R9z3euzO7fGBwz5yqakjKNRlGjjXJx5IEiDGxDTQeWZzw0WorTgaGxs2YRjt4b2Cs9igBI8lHA1itqA/PsUK4dnLCsmnorcP7sM2fyWQymT+Fr1qgfOITn+Bf/+t/zStf+coz1//Ij/wIv/Ebv8Gv/dqv8dGPfpTHHnuM7/me7xlv997z5je/mb7v+djHPsav/Mqv8Mu//Mu85z3v+erfReZZzyBQnPd0vWXZNNw4Puax5Qluf5dmp2ZVgC01VoMjtlR88mmEoctjDKLV2DMZAseKokBpTV3X1HU9xuYP4mTwaYyTLSIE5wnOxymfEPByWoqMdQqS0+NLKjJnBccgEbYCxwcfBdBQwZHUOhqj6rePisKHsQ0VWzSB7TYgGY/HGI0x+oxI2R4Tp4TaqXeQKlBOxUuvQe1M6eqCG5sVJ5sNTd/nzJNMJvMV81UJlNVqxVvf+lb+zb/5N5w7d268/uTkhH/7b/8t//yf/3P+6l/9q7zmNa/hl37pl/jYxz7G7/zO7wDw3/7bf+MP/uAP+Hf/7t/x6le/mu/8zu/kH//jf8zP//zP0/f91+ddZZ6VDLkmzjuatuNkteKx20c8ulzQ787Z7E7opxVSFqiUEzJUKyRtMj6zXyadSIe9OgBFWZ6ZnuFUSu0TDuZMq+XJ73RKRJzyjzgX9/2UpsRg0HHg+dS9t8/ok9gYAtNOT+0M4XLO+jiFFLY7f0KIrxMknD2iM76c7ecw/n2qnTUuL0xem6AVXit6DX5Sog/2uL5acnuZWjvDQsAsTjKZzFfAVyVQHnroId785jfzhje84cz1n/zkJ7HWnrn+pS99KQ888AAPP/wwAA8//DCveMUruHz58nifN73pTSwWCz796U8/6et1XcdisThzyWSeiAASAt6HNHrccWcZRcqNtsUfxlZPYxRWK7zeVk7Gx6dqSGx2xBO68370bzjnYtWCrdF1qC6kgaBty+WUhyPuTjapCjG0hWIrady7c0qC6DSmi9YoU4A2aeqGU5f4JySFI0qB0aANShu8KJwH5wLBQ/AKGTNgkhl3+4rxb4HgQpr6UfG4lYmTRaLwXnA+4ESIO5EF0YLXgtOBVgfaSsP+DreaDTeXCxabhq63KdI+C5RMJvOV8ZTHjH/1V3+V3/u93+MTn/jEl9x27do1qqri4ODgzPWXL1/m2rVr431Oi5Ph9uG2J+O9730vP/mTP/lUDzXzLGRosQztnqbrxhZEXZbsTGv63jLRQhnAqO08DSpF0bM13g5PqlMCrXMW0vI/ecJrjscwjhMnkSKKoijRClrbJmGQxMmpA1fDkwnRGJuO2xRFDHlLx7ZFjRUPnbYuqzT+G58q4J3HuSEgLnpsfGpLEbbtmW3VSKdQXGH7yQymFMVYNJIYqS9aCMojWnAEbGEw+3NOXM/1kxNO1hva1NrxYTvFlMlkMn8WT6mC8oUvfIF/8A/+Af/+3/97JpPJN+qYvoR3v/vdnJycjJcvfOELf26vnXnmcVpgOOdo+p6T1Yov3r7Nza4j7O3SlJpWgUtViu324DQ6bGJw2Rg6lsZrT4/bDr6TP9vtufWXnE5nHW560uNn2xT60qyQKBYUcfmf0WY07kbRokeX6yg8Tj3XMPqc7jC+lzNCZfsyZ19ZbUURKuWciNAjWK0wOzucWMf14xOOz4gTGYVNJpPJfCU8JYHyyU9+khs3bvAX/+JfpEjGwY9+9KP83M/9HEVRcPnyZfq+5/j4+Mzjrl+/zpUrVwC4cuXKl0z1DN8P93kidV2zt7d35pLJfDm+RKTYnjurFY8f3+FG3+D2d2hnFRsDlnjSNsaM5k+TQtL0aJrdnryV0WlMOQWsJZPt8DU6+kGG70WlkWEJ6bYUaJYSWUWdcqiMKbTJEitpW/DgMxkj4jQkURIEnA+EEL0lQdIgURCQJFjklGAZA+S2Mmjrl5FRgEF8T2luGkkXpRQ6hbN5pbFKYbWB+Q4nIXDt+IQ763UUJ24QJ9kYm8lknhpPSaB8x3d8B5/61Kf4/d///fHy2te+lre+9a3j12VZ8pGPfGR8zGc/+1keeeQRrl69CsDVq1f51Kc+xY0bN8b7fPjDH2Zvb48HH3zw6/S2MpntSTdmpMQgt3XbcrRes9YKff4c/byiMxBKTTmpMWnUeNwrA5jCUBRmnIzRRUE1nVBNJ+jCENL90QpTFiniXX1J9SFWX+L1MsTQnxrbHY55MJKeneiBM16RFOLmfcD2Dmc9iEKCQgJI+tr7gATYVlfUeAxqfJ7TYXSnvk7iKvpa1PYyiC6lsErwhYGdGUsCN5ZLlk3ynDgfJ5eyOMlkMl8FT8mDsru7y7d+67eeuW4+n3P+/Pnx+re//e28613v4vDwkL29Pd75zndy9epVXv/61wPwxje+kQcffJDv//7v533vex/Xrl3jx3/8x3nooYeo6/rr9LYymciw68V5P1YktNYUxqAPzzPf26VzHrXqML1N8fUxVC2Kg4DWBkS2VQPv6ft+6w9JkzRwarLlVK5JHPRJuR8KTufIjmFrQN8JpzLpt4wn9yf2g7YmXSQZa5NwGR6jtB53EMWCiyLZVcbJpe1TDfuD0oixOlXpOXU/UXFax+qYIePrkrXtOel7lm1HZx0u+K3Qeio/sEwmk0l83Xfx/OzP/ixaa97ylrfQdR1vetOb+IVf+IXxdmMMH/zgB3nHO97B1atXmc/nvO1tb+Onfuqnvt6HkskA0fiKCBbG3+RF4pLBg/mcw/0dyqpifWdFFYRK62iEJdYrQu9i0SH5LhCwwVOkBFVtooG2SBuGu74bg8gGw+zpyRs1iAEB5dU2oVV5gj414nzqPZwefVanBU76SqPAx8mg6BOJKbFGgTY6mn9VrP6gYhR+0BpCGMeT1alnHEeNk3N3EFeBWD2xSrCFwU8rVuK50zRsrKW1Fut9TMbN4iSTyXwNKHkG1l4XiwX7+/tP92FknmEMAWNGa6qyZFJXTKqKg50drsx32beB4mhJ3XsqUamjoXHO4b1HjCKY5A5REKw705IpywJQNH2HC4Egw1RQDGLzKsXZq3gseEGjoYjel97a0Wtyehx3SJdVSuG8R4tEQRJvjUJK6zMeGpUqPaiQPLoCSlPUJU7COC4dQkgL/hjHjs9sLU4IClUYLILXoGY1m8Jw4npO2pZN32O9x7owbil+Bv6nJZPJ/DlxcnLyZ/pJ8zbjzLOGINslepK+763DOoftLd18h3Pnd3Grhr51TANUyqARnMQTryiNKIkjukoRgk8VB4V3fgx7G6do0gK92F8JnImiVxolaswHGSo9pxm7QnC21RKvGP8KIrGVM4SppVFrbZIYSpklzrlo6GWo7sT3IqlYImqb8OJha+BV4PA4rXBlgTOKRd+y6Dqa3tI7F0VZkCxOMpnM14UsUDLPKmI8PYiP2SA+iQPnPL1ztLu7HB7Mqdoet+mZBiiMwYnDWUGLpKWB0fehzlQZor9FC6gwNHYYp3W00skTExiqMEPvJ1ZWttNCQx7L4B8REYL3MePl1CtCkhMqShBUGmVWcQRZVNjG+J/ywwzHJagkjLZCBiVxy3GaTnIIYhS+LOi1olHCerNhbS2tc1jvz0TYZ22SyWS+HmSBknnWMYzT+pBO/BLwydRpnWMz7zmczzk8v4dsOoquBymi0dUHVPj/t3f3sVFV+RvAn3vnrVPrzNSWzoBSQJe1ItUgrN0R3f2DRmSb1QXDuqQqvkSDlghKWGQNuMkGIZrdZN3s1tXsggksrCS+wQKmFpVFxwJdyqtWXIESZNosODPlRzsv935/f9yZ205BYbC0d8rzSb6Gzj2dnnvSOk/uPedcHaqiwGG3A7AjlUwat4BgbLBmzDPpfbUjvQ8KBA67A1oqmX61114qgLmSBzDmaqnpqyC9V8H0bMKWOZnMrRlj59v09JZeP1rpNe+l5zuVXv/RjUxizi+BGEEH6Hnon+6wIWVX8X9aCmeSSXSlNMTT4URLXzkxJwITEfUDBhS6LBl3QnSIokAXJb2HiBFQuuJxxM6cwekrr0RxQQEKVAdsuga7psImAugCmwh0JT251WYzVwv1rOLJzC/t++waoPekU0CBCvWcO6xm5p5oum7OTcm8t7muRzEvwpi3eWy9bjEhfcumZ3KwcRtHAGPuiShQFOMpxwqMfU6MoCLGgxQddiRtNsQVwf8lEziTSCKupZDU9PQ8G4YTIro0GFDostaz54eSvuVjhBRjz5QunHQVwOt2o8RdALfdDlsiCSWRhJpKQaAbf0A2BZoo0HWY29eLkRiM/U7Se6TYFRWi6bCp6T87Se9Nku7LOTayT++9Isa+JsjME+nVfyjpoCW9QknPKhzFuCSSbpfe4j79RgqM+TQpAbT0JBRdAXRVhaYqSKkKNJuKLj2FLi2F7lQKCU3rNddE77VSiYiofzGgECFzq0fSV1TSz/LRdSRSKZxJJnC6IAFfYSE8RQVw6U4o8QTi3QnYtBRsigLVrkDRATGeKZgOFzaoqvG+gJ6+a2JctTD/CZiBJutDPp01jOXHgJa+LZWZzJqZwCKZ/WAVSb+fER7sqh2qombuLpk/RzGW/Bi3fnQNmoix5T8AsakQVYWmqkgoQEJ0JLQEEinNWKHT63aOnplg27ffRET9hAGFKM3cQRY9gUXv/XTkRBzfOJwotDtQ5HDA7nbCmdKgaBpsusClGCHFuM2iQrH1bEyfmYzbMz9VMeeBKHr6ykYmTfR+gCGMqyFqZoJr1qRc9Czz6fW6nplIKwI1c17pLew1VYFmA1KiI6kIuhQdXRBoigpdVaApQErX0K2lkNCMlUtGZa6YGFvp992nhYiovzGgEPVhztcAjA3NRJDSdcSTKZyxxRGz2+C02eG2O3BlQQEKnC7YdWPJsk10OFI6bOlVOXZjZoexGZqenieC9PN20rd/jN1edWPFTu8AIsayYOPBhTbomm7eBuq1WWzPFvpAel5Jegdd9FzdMK6yAAkIunUNcUUQVwXdIkiIICUatBSQSl850nSBJnqvlTnC2zlENKAYUIjOwbg6YUx81Xrd9tE0DYmUii41gTOqDafj3XA5HHA7nSh0OuFyOVCQ1KCmNEDX4NAEdhEoqgD2nmflQAFsmfkq6Tkiaubr9Hb3SvprLb1qSFEVqKJAg27uqCKZ+zaq0Wtjx9vMpnCZOSsKUgqQUgRxBeiCoBuCuOiI6zqS6TCi6WJusqb3DSQMJkQ0wBhQiL7DWUEFgJJeUZNSNMSTSajxOOw2FS6HAwUOJwpsNjhVFXZFgR06HCKwC+BUFNhV4zaQLoCWiSdiLEtWbSpUGJu/ZW4HKQrSV07St4VUAKJAhxgTWpFeyaP2XCvJzA2Bqhr7nCiCOIyt/pMK0KXr6BbdmFdi3r7J3NLqvUU955gQ0eBhQCG6AL2vImRuxWRCAzQNyZSSvgWUgMNmg92mwqGosKsqnIoCp6LApQAuRTVCSnpXW0UT2CCwAbBLei8UBRBV6ZUM1PStnfSSYUWFDh1anyXMmcCSmYMiukBTVSQB41aOriOR0pHUdaTSW/RrfW/hMJQQkUUwoBDl6KywAkBXjNCR2ewt8ywbVVFgU40rJ3ZVNa6sqApsMPYfUVWBU1HgEMCm6LClt0qxKcZxY+5sz0ZtevppxTqMKx5aev8RY98S4ynDOhToioIUBIn0bZx4egWOJtLrFk4mlMCc0MJgQkRWwYBC9D307Oja5zZQelWOETLSYSUdVDLBJbMLij09mVYVmBut2TLfk34PKD1XNvTM0mJb+vlCxspl6KqxLb0O44pKzy0cvc/VEoYSIrI+BhSifpL9oW/sU6Kk1wIbV1gATe/1lGBzyTF6gghgBhObajy/R81sh5/Zz6TXrR098yPT80e09DJg6XWlJLOqhxNeiSifMKAQ9bNzXp24iH3g+z67+Lw/k4hoCGFAIbIohg4iupyp529CRERENLAYUIiIiMhyGFCIiIjIchhQiIiIyHIYUIiIiMhyGFCIiIjIchhQiIiIyHIYUIiIiMhyGFCIiIjIchhQiIiIyHIYUIiIiMhycgoov/3tb6EoSlZVVFSYx7u7u1FXV4eSkhIUFRXh3nvvRXt7e9Z7tLW1oaamBoWFhSgrK8PChQuRSqX652yIiIhoSMj5YYE33ngj3n///Z43sPe8xdNPP41//etfWL9+PbxeL+bOnYsZM2bg448/BgBomoaamhoEAgF88sknOHHiBB588EE4HA688MIL/XA6RERENCRIDp5//nm5+eabz3ksEomIw+GQ9evXm6999tlnAkBCoZCIiGzatElUVZVwOGy2qa+vF4/HI/F4/IL7EY1GBcbDXlksFovFYuVZRaPR837W5zwH5dChQxgxYgSuvfZa1NbWoq2tDQDQ3NyMZDKJ6upqs21FRQXKy8sRCoUAAKFQCJWVlfD7/WabqVOnIhaL4cCBA9/6M+PxOGKxWFYRERHR0JVTQKmqqsKqVauwZcsW1NfX4/Dhw7jjjjvQ2dmJcDgMp9MJn8+X9T1+vx/hcBgAEA6Hs8JJ5njm2LdZvnw5vF6vWSNHjsyl20RERJRncpqDMm3aNPPfN910E6qqqjBq1Ci88cYbcLvd/d65jMWLF+OZZ54xv47FYgwpREREQ9j3Wmbs8/nwwx/+EF9++SUCgQASiQQikUhWm/b2dgQCAQBAIBA4a1VP5utMm3NxuVzweDxZRUREREPX9woop0+fxn//+18MHz4cEydOhMPhQGNjo3m8tbUVbW1tCAaDAIBgMIh9+/aho6PDbNPQ0ACPx4Nx48Z9n64QERHRUHLBS2dEZMGCBfLhhx/K4cOH5eOPP5bq6mopLS2Vjo4OERGZM2eOlJeXy9atW2XXrl0SDAYlGAya359KpWT8+PFy5513SktLi2zZskWGDRsmixcvzqUbXMXDYrFYLFYe14Ws4skpoNx3330yfPhwcTqdcvXVV8t9990nX375pXm8q6tLnnzySSkuLpbCwkKZPn26nDhxIus9jhw5ItOmTRO32y2lpaWyYMECSSaTuXSDAYXFYrFYrDyuCwkoiogI8kwsFoPX6x3sbhAREdFFiEaj551PymfxEBERkeUwoBAREZHlMKAQERGR5TCgEBERkeUwoBAREZHlMKAQERGR5TCgEBERkeUwoBAREZHlMKAQERGR5TCgEBERkeUwoBAREZHlMKAQERGR5TCgEBERkeUwoBAREZHlMKAQERGR5TCgEBERkeUwoBAREZHlMKAQERGR5TCgEBERkeUwoBAREZHlMKAQERGR5TCgEBERkeUwoBAREZHlMKAQERGR5TCgEBERkeUwoBAREZHlMKAQERGR5TCgEBERkeXkHFCOHz+O+++/HyUlJXC73aisrMSuXbvM4yKCpUuXYvjw4XC73aiursahQ4ey3uPUqVOora2Fx+OBz+fDo48+itOnT3//syEiIqIhIaeA8s0332Dy5MlwOBzYvHkzDh48iN///vcoLi4227z44ot4+eWX8corr6CpqQlXXHEFpk6diu7ubrNNbW0tDhw4gIaGBmzcuBHbtm3D448/3n9nRURERPlNcrBo0SK5/fbbv/W4rusSCATkpZdeMl+LRCLicrlk7dq1IiJy8OBBASA7d+4022zevFkURZHjx49fUD+i0agAYLFYLBaLlYcVjUbP+1mf0xWUd999F5MmTcLMmTNRVlaGCRMm4LXXXjOPHz58GOFwGNXV1eZrXq8XVVVVCIVCAIBQKASfz4dJkyaZbaqrq6GqKpqams75c+PxOGKxWFYRERHR0JVTQPnqq69QX1+PsWPH4r333sMTTzyBp556Cq+//joAIBwOAwD8fn/W9/n9fvNYOBxGWVlZ1nG73Y6rrrrKbNPX8uXL4fV6zRo5cmQu3SYiIqI8k1NA0XUdt9xyC1544QVMmDABjz/+OB577DG88sorl6p/AIDFixcjGo2adezYsUv684iIiGhw5RRQhg8fjnHjxmW9dsMNN6CtrQ0AEAgEAADt7e1Zbdrb281jgUAAHR0dWcdTqRROnTpltunL5XLB4/FkFREREQ1dOQWUyZMno7W1Neu1L774AqNGjQIAjBkzBoFAAI2NjebxWCyGpqYmBINBAEAwGEQkEkFzc7PZZuvWrdB1HVVVVRd9IkRERDSEXNCymbQdO3aI3W6XZcuWyaFDh2TNmjVSWFgoq1evNtusWLFCfD6fvPPOO7J371655557ZMyYMdLV1WW2ueuuu2TChAnS1NQk27dvl7Fjx8qsWbMuuB9cxcNisVgsVv7WhaziySmgiIhs2LBBxo8fLy6XSyoqKuTVV1/NOq7ruixZskT8fr+4XC6ZMmWKtLa2ZrU5efKkzJo1S4qKisTj8cjDDz8snZ2dF9wHBhQWi8VisfK3LiSgKCIiyDOxWAxer3ewu0FEREQXIRqNnnc+KZ/FQ0RERJbDgEJERESWw4BCRERElsOAQkRERJbDgEJERESWw4BCRERElsOAQkRERJbDgEJERESWw4BCRERElsOAQkRERJbDgEJERESWw4BCRERElsOAQkRERJbDgEJERESWw4BCRERElsOAQkRERJbDgEJERESWw4BCRERElsOAQkRERJbDgEJERESWw4BCRERElsOAQkRERJbDgEJERESWw4BCRERElsOAQkRERJbDgEJERESWw4BCRERElpNTQBk9ejQURTmr6urqAADd3d2oq6tDSUkJioqKcO+996K9vT3rPdra2lBTU4PCwkKUlZVh4cKFSKVS/XdGRERElPdyCig7d+7EiRMnzGpoaAAAzJw5EwDw9NNPY8OGDVi/fj0++ugjfP3115gxY4b5/ZqmoaamBolEAp988glef/11rFq1CkuXLu3HUyIiIqK8J9/DvHnz5LrrrhNd1yUSiYjD4ZD169ebxz/77DMBIKFQSERENm3aJKqqSjgcNtvU19eLx+OReDx+wT83Go0KABaLxWKxWHlY0Wj0vJ/1Fz0HJZFIYPXq1XjkkUegKAqam5uRTCZRXV1ttqmoqEB5eTlCoRAAIBQKobKyEn6/32wzdepUxGIxHDhw4Ft/VjweRywWyyoiIiIaui46oLz99tuIRCJ46KGHAADhcBhOpxM+ny+rnd/vRzgcNtv0DieZ45lj32b58uXwer1mjRw58mK7TURERHngogPK3/72N0ybNg0jRozoz/6c0+LFixGNRs06duzYJf+ZRERENHjsF/NNR48exfvvv48333zTfC0QCCCRSCASiWRdRWlvb0cgEDDb7NixI+u9Mqt8Mm3OxeVyweVyXUxXiYiIKA9d1BWUlStXoqysDDU1NeZrEydOhMPhQGNjo/laa2sr2traEAwGAQDBYBD79u1DR0eH2aahoQEejwfjxo272HMgIiKioSaHRTsiIqJpmpSXl8uiRYvOOjZnzhwpLy+XrVu3yq5duyQYDEowGDSPp1IpGT9+vNx5553S0tIiW7ZskWHDhsnixYtz6gNX8bBYLBaLlb91Iat4cg4o7733ngCQ1tbWs451dXXJk08+KcXFxVJYWCjTp0+XEydOZLU5cuSITJs2Tdxut5SWlsqCBQskmUzm1AcGFBaLxWKx8rcuJKAoIiLIM9Fo9KzVQkRERJQfIpEIvF7vd7bJy2fxnDx5crC7QERERBeps7PzvG0uahXPYLvqqqsAGM/1OV8CI0MsFsPIkSNx7NgxeDyewe6O5XG8cscxyw3HK3ccs9xZbcxEBJ2dnRe0RUleBhRVNS78eL1eSwx4PvF4PByzHHC8cscxyw3HK3ccs9xZacwu9MJCXt7iISIioqGNAYWIiIgsJy8DisvlwvPPP8/dZXPAMcsNxyt3HLPccLxyxzHLXT6PWV4uMyYiIqKhLS+voBAREdHQxoBCRERElsOAQkRERJbDgEJERESWw4BCRERElpOXAeXPf/4zRo8ejYKCAlRVVWHHjh2D3aVBsXz5cvzoRz/ClVdeibKyMvziF79Aa2trVpvu7m7U1dWhpKQERUVFuPfee9He3p7Vpq2tDTU1NSgsLERZWRkWLlyIVCo1kKcyKFasWAFFUTB//nzzNY7X2Y4fP477778fJSUlcLvdqKysxK5du8zjIoKlS5di+PDhcLvdqK6uxqFDh7Le49SpU6itrYXH44HP58Ojjz6K06dPD/SpXHKapmHJkiUYM2YM3G43rrvuOvzud79D78WSl/t4bdu2DT//+c8xYsQIKIqCt99+O+t4f43P3r17cccdd6CgoAAjR47Eiy++eKlP7ZL5rjFLJpNYtGgRKisrccUVV2DEiBF48MEH8fXXX2e9R16O2Xmfd2wx69atE6fTKX//+9/lwIED8thjj4nP55P29vbB7tqAmzp1qqxcuVL2798vLS0t8rOf/UzKy8vl9OnTZps5c+bIyJEjpbGxUXbt2iU//vGP5bbbbjOPp1IpGT9+vFRXV8vu3btl06ZNUlpaKosXLx6MUxowO3bskNGjR8tNN90k8+bNM1/neGU7deqUjBo1Sh566CFpamqSr776St577z358ssvzTYrVqwQr9crb7/9tuzZs0fuvvtuGTNmjHR1dZlt7rrrLrn55pvl008/lX//+9/ygx/8QGbNmjUYp3RJLVu2TEpKSmTjxo1y+PBhWb9+vRQVFckf//hHs83lPl6bNm2S5557Tt58800BIG+99VbW8f4Yn2g0Kn6/X2pra2X//v2ydu1acbvd8te//nWgTrNffdeYRSIRqa6uln/+85/y+eefSygUkltvvVUmTpyY9R75OGZ5F1BuvfVWqaurM7/WNE1GjBghy5cvH8ReWUNHR4cAkI8++khEjF9ch8Mh69evN9t89tlnAkBCoZCIGL/4qqpKOBw229TX14vH45F4PD6wJzBAOjs7ZezYsdLQ0CA//elPzYDC8TrbokWL5Pbbb//W47quSyAQkJdeesl8LRKJiMvlkrVr14qIyMGDBwWA7Ny502yzefNmURRFjh8/fuk6PwhqamrkkUceyXptxowZUltbKyIcr776ftj21/j85S9/keLi4qy/yUWLFsn1119/ic/o0jtXqOtrx44dAkCOHj0qIvk7Znl1iyeRSKC5uRnV1dXma6qqorq6GqFQaBB7Zg3RaBRAz9Oem5ubkUwms8aroqIC5eXl5niFQiFUVlbC7/ebbaZOnYpYLIYDBw4MYO8HTl1dHWpqarLGBeB4ncu7776LSZMmYebMmSgrK8OECRPw2muvmccPHz6McDicNWZerxdVVVVZY+bz+TBp0iSzTXV1NVRVRVNT08CdzAC47bbb0NjYiC+++AIAsGfPHmzfvh3Tpk0DwPE6n/4an1AohJ/85CdwOp1mm6lTp6K1tRXffPPNAJ3N4IlGo1AUBT6fD0D+jllePc34f//7HzRNy/pwAAC/34/PP/98kHplDbquY/78+Zg8eTLGjx8PAAiHw3A6neYvaYbf70c4HDbbnGs8M8eGmnXr1uE///kPdu7cedYxjtfZvvrqK9TX1+OZZ57Bb37zG+zcuRNPPfUUnE4nZs+ebZ7zucak95iVlZVlHbfb7bjqqquG3Jg9++yziMViqKiogM1mg6ZpWLZsGWprawGA43Ue/TU+4XAYY8aMOes9MseKi4svSf+toLu7G4sWLcKsWbPMpxfn65jlVUChb1dXV4f9+/dj+/btg90Vyzp27BjmzZuHhoYGFBQUDHZ38oKu65g0aRJeeOEFAMCECROwf/9+vPLKK5g9e/Yg98563njjDaxZswb/+Mc/cOONN6KlpQXz58/HiBEjOF50ySWTSfzyl7+EiKC+vn6wu/O95dUtntLSUthstrNWVbS3tyMQCAxSrwbf3LlzsXHjRnzwwQe45pprzNcDgQASiQQikUhW+97jFQgEzjmemWNDSXNzMzo6OnDLLbfAbrfDbrfjo48+wssvvwy73Q6/38/x6mP48OEYN25c1ms33HAD2traAPSc83f9TQYCAXR0dGQdT6VSOHXq1JAbs4ULF+LZZ5/Fr371K1RWVuKBBx7A008/jeXLlwPgeJ1Pf43P5fZ3CvSEk6NHj6KhocG8egLk75jlVUBxOp2YOHEiGhsbzdd0XUdjYyOCweAg9mxwiAjmzp2Lt956C1u3bj3r8tzEiRPhcDiyxqu1tRVtbW3meAWDQezbty/rlzfzy933gynfTZkyBfv27UNLS4tZkyZNQm1trflvjle2yZMnn7V0/YsvvsCoUaMAAGPGjEEgEMgas1gshqampqwxi0QiaG5uNtts3boVuq6jqqpqAM5i4Jw5cwaqmv2/VZvNBl3XAXC8zqe/xicYDGLbtm1IJpNmm4aGBlx//fVD8vZOJpwcOnQI77//PkpKSrKO5+2YDdr03Iu0bt06cblcsmrVKjl48KA8/vjj4vP5slZVXC6eeOIJ8Xq98uGHH8qJEyfMOnPmjNlmzpw5Ul5eLlu3bpVdu3ZJMBiUYDBoHs8sm73zzjulpaVFtmzZIsOGDRuyy2b76r2KR4Tj1deOHTvEbrfLsmXL5NChQ7JmzRopLCyU1atXm21WrFghPp9P3nnnHdm7d6/cc88951wWOmHCBGlqapLt27fL2LFjh8yy2d5mz54tV199tbnM+M0335TS0lL59a9/bba53Mers7NTdu/eLbt37xYA8oc//EF2795trjjpj/GJRCLi9/vlgQcekP3798u6deuksLAwb5cZf9eYJRIJufvuu+Waa66RlpaWrM+C3ity8nHM8i6giIj86U9/kvLycnE6nXLrrbfKp59+OthdGhQAzlkrV64023R1dcmTTz4pxcXFUlhYKNOnT5cTJ05kvc+RI0dk2rRp4na7pbS0VBYsWCDJZHKAz2Zw9A0oHK+zbdiwQcaPHy8ul0sqKirk1VdfzTqu67osWbJE/H6/uFwumTJlirS2tma1OXnypMyaNUuKiorE4/HIww8/LJ2dnQN5GgMiFovJvHnzpLy8XAoKCuTaa6+V5557LuuD4nIfrw8++OCc/9+aPXu2iPTf+OzZs0duv/12cblccvXVV8uKFSsG6hT73XeN2eHDh7/1s+CDDz4w3yMfx0wR6bXFIREREZEF5NUcFCIiIro8MKAQERGR5TCgEBERkeUwoBAREZHlMKAQERGR5TCgEBERkeUwoBAREZHlMKAQERGR5TCgEBERkeUwoBAREZHlMKAQERGR5fw/NVFPs8GYmzIAAAAASUVORK5CYII=", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "plt.imshow((segm_mask[...,None]/255.*video[0,0].permute(1,2,0).cpu().numpy()/255.))" ] }, { "cell_type": "code", - "execution_count": 27, + "execution_count": null, "id": "b42dce24-7952-4660-8298-4c362d6913cf", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "IMAGEIO FFMPEG_WRITER WARNING: input image is not divisible by macro_block_size=16, resizing from (1496, 920) to (1504, 928) to ensure video compatibility with most codecs and players. To prevent resizing, make your input image divisible by the macro_block_size or set the macro_block_size to 1 (risking incompatibility).\n", - "[swscaler @ 0x7227e40] Warning: data is not aligned! This can lead to a speed loss\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Video saved to ./videos/segm_grid.mp4\n" - ] - } - ], + "outputs": [], "source": [ "pred_tracks, pred_visibility = model(video, grid_size=grid_size, segm_mask=torch.from_numpy(segm_mask)[None, None])\n", "vis = Visualizer(\n", @@ -783,24 +678,10 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": null, "id": "1810440f-00f4-488a-a174-36be05949e42", "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 28, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "show_video(\"./videos/segm_grid.mp4\")" ] @@ -831,28 +712,17 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": null, "id": "379557d9-80ea-4316-91df-4da215193b41", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "torch.Size([1, 50, 3, 720, 1296])" - ] - }, - "execution_count": 29, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "video.shape" ] }, { "cell_type": "code", - "execution_count": 30, + "execution_count": null, "id": "c6db5cc7-351d-4d9e-9b9d-3a40f05b077a", "metadata": {}, "outputs": [], @@ -871,28 +741,17 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": null, "id": "0918f246-5556-43b8-9f6d-88013d5a487e", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "torch.Size([1, 50, 3, 200, 360])" - ] - }, - "execution_count": 31, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "video_interp.shape" ] }, { "cell_type": "code", - "execution_count": 32, + "execution_count": null, "id": "e4451ae5-c132-4ef2-9329-133b31305db7", "metadata": {}, "outputs": [], @@ -910,23 +769,16 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": null, "id": "3b852606-5229-4abd-b166-496d35da1009", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "step 0 / 4\n", - "step 1 / 4\n", - "step 2 / 4\n", - "step 3 / 4\n" - ] - } - ], + "outputs": [], "source": [ - "pred_tracks, pred_visibility = model(video_interp, grid_query_frame=grid_query_frame, backward_tracking=True)" + "import time\n", + "start_time = time.time()\n", + "pred_tracks, pred_visibility = model(video_interp, grid_query_frame=grid_query_frame, backward_tracking=True)\n", + "end_time = time.time() \n", + "print(\"Time taken: {:.2f} seconds\".format(end_time - start_time))" ] }, { @@ -939,18 +791,10 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": null, "id": "5394b0ba-1fc7-4843-91d5-6113a6e86bdf", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Video saved to ./videos/dense.mp4\n" - ] - } - ], + "outputs": [], "source": [ "vis = Visualizer(\n", " save_dir='./videos',\n", @@ -967,24 +811,10 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": null, "id": "9113c2ac-4d25-4ef2-8951-71a1c1be74dd", "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 37, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "show_video(\"./videos/dense.mp4\")" ] @@ -1014,7 +844,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.13" + "version": "3.9.19" }, "vscode": { "interpreter": {