From 451aec2911d2ea03203555d0062b4627ff3f5d13 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna Date: Wed, 8 Nov 2023 21:10:53 +0100 Subject: [PATCH 01/40] unit test proposed OOP structure --- _tests/corrupted_sheet.csv | Bin 0 -> 10240 bytes _tests/dev-null | 0 _tests/test_main.py | 222 ++++++++++++++++++++++++++++++------- _tests/wrong_format.txt | 2 + _tests/wrong_format_no_ext | Bin 0 -> 10240 bytes 5 files changed, 186 insertions(+), 38 deletions(-) create mode 100644 _tests/corrupted_sheet.csv create mode 100644 _tests/dev-null create mode 100644 _tests/wrong_format.txt create mode 100644 _tests/wrong_format_no_ext diff --git a/_tests/corrupted_sheet.csv b/_tests/corrupted_sheet.csv new file mode 100644 index 0000000000000000000000000000000000000000..bb764a363273329a018bc6d6e360374a2dbdd562 GIT binary patch literal 10240 zcmV+bDF4?#Q)E-if80^E$~faEUdeQx`KymCLE6dx!@pEX$+wFNBX5{!1GDt9nV|)6 zAA{cS5-A$~5gCk@dm7hRHb72E4O?}jB8V_8w-I_(J^T5Z$gXOC3u#cftl(l{G?L!f zZV+ymD>@EzKB9_?79y|0+?)t{9XR`3a8f3bxla9P1+Z#$@WK+qx$8MWIhfa>T|I5x z_@TUzPE^9&Lns+uD0snHl)s{r@bl?`2Ri*eZ6cIYfx`H^%bV6p+gnBWv;+$keBCd) zOWhT}pqmbk-D?WicsVEt>;GdT+vNR=-=?Og<|E1PPn?!fWK&&9b%f^oYS$w=zjZja z;+LD!@+2S{v=YmO!YO3v`CN0`9?7c%_amhtnq7jmSUSOT6j<*Ha4nv|9%R^p*RS{i zy0Ei;N$X?g*8X0;mNT>euRyr;QVWhD4qHLsWl0qye9JwOg{eSOJ29%_#@&ojQl3Vl=_-~8izOHP~D@SyiI6D_#@RF9=}3aIz+JzK<(wO9A- zC)1#@0;82WJwFPA>CGIQ-LNjFw(n$WAi+UUNZ7yX+ppTX8!=5B7{t7>C*9gHSv!A9 zSI-Ba_JTf(N9?jGzg8g9Le>C^k7*)SK)#`o`Ljcf5&F6=@R)%*B+?B7^FgW&NUO-* zL#4u&A^xdqu1{jVKIUJ4Dw0jB#VGa`1hk}z^yE)3W+FeQf%bP{`TN(C=?{u6Vc%r* zU9+cPb!rZ);Tzi@MSgVlD(K+ug~yA`95U(i-qz~>lyWkEzpAb&UoP7OeKoPGtmcrV zm?|bEC3B(v2iW4oV7U}$H~mtU;ET_N;xctX2m~2Kx2V=2gU|Jzwis5UwF-B!tb8+& ze&;m^d1Qh-x6rUD&Zj_Oj{jrGl1VXwp0 z(){Ko_pg-PlD4QXF#=cA)3h`at6dEzj29kNTN4|e%|R8O$;&zQc7_$JbuBw(w*iyf6 zz);ET5!ma+^e=EVr_D4DtILeh6WJ(33~1z1N<8v_@mjA3u_Eg^A4~#{&u6Ulo&Oxv zfXukRi0)FHX@cuU@@nvDhtNV10Ap?b3KMH6`tyAUNN+W2oDUeZQAG_|&Bt7MOG0)v z-zoJ8RsdXNh(3thr^IxeSnN)jnh+O2G`zAQZ$kfR!@2cyhx1>&i=@8Fbc4XTf%=!| z+WQPV`mYnvgPCxTlv?M!O6JLOv?Yx=&JcpKA%-u2qn{zv6L96kBP-q5Mic2waY7E& z*h6pfhA6&uXqAPj-QL`guCw4EK#5gV$mc-9nM)cX%a7yeR4V2sI@1}-ro~qHusffQ zO;L2JO9+i{TOWj)V{b#Bzt-fMOr2n`@~cdk9z$n}U5I0yO?fzo5zP=TbdpG2KJR4L zep{653n_%`@+_P1ecT(|R}8^s0!SVFI~1wFP7X;}c@Ves%hT7~QATv7z>U`|^!=3} zsaZ#_z2}m;g(%|?+%IOl!5P8%oa@5wktOUj##oAoP+~A??|~LbA+!Iw_9w$im_5K4y_`_a%Pm6)^g_v$r z+I6dN4^HWTMGFxqM*Mt;BqG?knLb1sQHEynWs2IULwr*SSsJmzmjNz}2Y>t~tBPWoDXGc{Vt_tII zgBN2{gJb9#uH=Z8L(Q1=j_Qtq`JE*hw=68zb zL4LGpV%9H((O4J~b;rDdsG&5=qkQo<1Hb>mKex-Y&$=`Nt$2Nw0w1DH4JL%StUMp$ zsSFUl3w!;D(+xMV%U=`#4Xer{BiTyk7mVj`mku30i-EAEmjwq>QS(~R_qnBNnVuf5B(ic04L7}!M0!uU>2|;W0g)@@)sQN+|`{}6= zXn4MTOV3c=J$i~z@4zRTQ($eT*XI3WNR>n*?rDNQn?D}1DnIk)nUhf{TUOvM{RB|! zA^lBwGxn@R04g_`p(tk;>oI4?uN1NY8#MG$6=pY$0Q4ie@4v7AuwA<@zr;vu9oxpb z3&elhldlVa(lNdyi&|p1^3ra}!3PUN%D(KG_-~!u*%Ia<{hWxPfVj8(!1KI0UB0Fv z>Sve0mOS9E>87^vswX*Y`|r3RK)ONE(~0$>@~}nzpyI(C%A`O4ZVbZ!f zMM&gl7vU+2!??#pk5`U*$)!YK=fwwoQ%me%-QZDQ-QjunV+FzhuyUd-Dck!|!Rl$!PSNns5 zVLfG{9%@4=Z<`&Tr}v!9ZN@_ejhEAr;GVY9S0jhEoh*82JU~7yN$@rGxA}BqkFJ2Y z!0@=0kh+bBa@6Z%;$tp8;jG*=3ySp$&W5wJ<^J8|p&zHV9-j5IU01CjHBX zjZ-L_du|)WJgVuSQ*hoyQ|HYty&u-*;(O2T2goGcXa`=oSdEPP3OT1g3{9rIFs%vA%M+tqlo#i+sE$UWsw5vSAszm;#Sbh_^LU&tzPP9E6l2qwsA9% zO0kju=35$Ujk5+^VYwzive8<+Fh zdfm6obO4Rx58b~kn8)`g|AWr%;8Cql|Arw^&KJggr$AZ<>W&K^CBrV{%s_rWutlGJ zcp~{1;!CZ=(?oaJBz7{Zg5nz1Bd2ly$GZ_GAyd2Zm~bD9Ig80SU zlH{21_lei*Hw#22d88#Z>qcew9QFgW59aKMNsMh|F}~^p(oF@h0|8vEE8*c*peL@jefrCmTu2gWVI0HjdA-N4Lq)J2Gu@7ax2nmqNCUFIQQpZR^O~R#crB zDke|DuI(WtVttP5%l_`3x_ekZH?TF;UqWDs?+8}1wt-QGej5sup8X;*Fd;^4VH{?G3@r(T3SbIgurls=tm}s|1$ELEq5Ic_Wh#UBn#(a4 z_JdV#jkGZ5EY0$wrNvOUOPC1~CeqoKAAD4|5CUrAJO8lV0w&Kqd=$e!xX$|k3*?@) z2I>I86a4yRZz6m8yx=>AZplI`3EVp5QYH-zGU;j}#|>`GI4w`?f92mIHm{eOy={P0NQtKPPt)j}1sCy^eRyyu8=#eWOKO_U++DzP){{PScHi4kcUeQWZ~6)EpWiPJSugPVA0(BE$-#SsHwh0+hFbm3!p! zUG*gL+;^^IWN*oG?(7cG>L6`hm5$s(yLtFMnO4UUO^u&v&8now)NTxvhTD7eujqOf z+Vp&CJ)liVlF;%Gc|Q)b>HLnyS?k!xVOjvsV(I0uaJ{cM(&ObE*HG$k+taG;i=4PC z>gayJ#fws+Z^@qkLY(=%K2oEH&Lq>Eclm|6t8r%-nlHf=Y? z&wTq!{!tcVG!jAk7L*47j?Le+KaG){6%pg~yv{-1$E5=KC7X+XsL#T1o3owM-UBmw z7-RI1GQih%Lf5gV+-b$Sm}E9=U5Vj`dkywGA_k{ed6BjcSN{G;^8vgPN-!ZteXt^~ z+L!K~&(Tm>7ql8IJH()vJt&UNX28Yket{4Oq0iUEHLcScb`=1`7e)2mgns~R9=pRL zHa}p08|)p;=7qJ<+LguphG%T%ue>Lf=Duzjm>L0~>yW+yJUO7xl>+LI!86z>93&$V zL9vI1=Cil0Bc~RVujEIrxF(Yc+nA zs9W!zccfn9`4)ZmBUJ}?m^7z1nV%?fg(ARyql_y)KP=D^CREOq&|G^HGCcJfkh|bI zuyYd0X6Mrq2yUHp{s9@@O|jYd9Mc>r%&YJ?JQ(9y?BT~cF;)C55%kFTG8G~g(h!rk z;Hk8x=CkbNI@bOI<$Dn_hQe!ymP9s)CC4ZHev43fUFSUTh~VMuBD2wvQW}hT1~Wfs zAb08uWhhMxyLQRkXQqEZnBc@M{~tmecel~|S?w@X0LAQ~?n82om4Bn&O1cMTd;20P zlV`0sF6~VLY;~H-Dd@%@Gbpj(F43HI%;?$Q_l|mSBHcM@63M4$3s?SPj`zET7;}th zBf$?S8j~jX3I$;Dh4%k~eLy;_nsHM07e-A6ff5^?!7P_OZQ2COMnVhrE*2T)`ah9T zrUlMXY~gz%($;b3m=WdepgC)x*T1~|>Voc8qfDCh-&NM~=wZIH(0bRF+*W~nx@vi2 zyC7~yI^3rCx(Z8;-lAKBu;rpAVoC1KP~{=!?#+3d;N>O-*=~DF-pK~xZh!y~+p0SH zVb8?HlSh?gObZ=ckCH`G)p+f@`CG!;WJG})|8@08Ocv20>aLjB82ZzI)`!%EaqksZ%y9jd~>xlo(8CT{wBoEfP8;ICsiAk6%Su-pYoZRz(mo95z{QRK2svUQjX9CSxls)JGEEj%x zKeEK{fU=URe7pVkL6CiZYZS)4O#-8AR+l8`{SOVjIOSMEbIiG;{83m*hgnvLL&{fJ zanh#O%W6TZMn!J0euYF+T^j9d@-Jr?-7CQcacesn>>MQua{4RMifFsIDCCK9Jf z2-OlN7UtxQi(T&PAQ#bIrlz{UL9!YL8_&U0l6wKR0;4@SKV?{b3MsanO9w%AhFgaog*BQi#*n9Vjxi7WoXG4VGs4@I99Zs zTPH6CXUhEJ5Tag(eI+OEu~|am9Jjj{5a*{YsJwI2(ps!Uad?kxdyQw(1#LJoUC)En z{X=e=o+r;ptV4{0P1Ka12eQ+Nm@aoiN}^5M_z0RECM}FrdJ-||<9E2c_|V3E;$3x! z&oK0!J%=d@N+5Iumxs?m%X6LmaPk&br~T|8Mi0gO#dYnse8NG^a#z?wjiq0`%FiG} zEZwZ=uB*JOA=6c--p!bQF9n=ij(UmyXkZGmk7-i}YKE;P*ZS?DM?S{zxsyWAy5p^X zd0*paF!+1=DvnA3)*f3lwse9h-UOt?5s4$yS$&-LwV48+pW;?f_%rdhx8+Tx> zpV4?>YI}r3*%4USmO^vRm@T*0n+*TVZaKDa2Da8w)8=K@^M&%mHxs)!43zA9wndJW z+BJMS7SE;Oa(nv@lY7Jn>JInRv7Rf{TK6ua4PB8c&hXL5^_nnSNTmg9L`U+~9wkfl1a zL`LgeT1v3qxdonhh~_H31ful~`I=;)2Dyjl^J!Hxee_xA^lesQ7ui?Kws7GS@(V|o zh)BwpYpw2I&S#uI>LiStR}DwAYU}vG)FCqiXs$~x*9Y#iDULT|SEpO(@WZBN){vJ| zw@BxOC-e;VuxH<;bI+l0?IbndVc9S9vN;#9lKjaM{~$n_vH%L|ebL%uH4OhlJWfzwDRvLq2Ak}VKYRA*MrqTdZ6Q(u68 z&m#jpZtrT97TbL|v)*0l3n_x}79oE0HS%_ea+z9Or~bTVhsd=LP01hdAA;DS?d$6i zslViY^PvXfYdNs5RQSBiig?^hQJbQ!0Qz{wB9cTBEby1PFL+Gf4>F7E^!0Q}(kizy z40XyV)9h0vX=ce9V(CRuPpNE8P*B|g%mpE*dz`tel^rwdEB6J;7#Q7fB+KyU^L9_3 z5R(HzR>Pc@eP_zQ9&hxti%i3v*@CE$&r%pp8mlsxnjTa}yj5Po)17>Hcg3_XvI=I? z`l;nz*lPcDodu1@Cqx|yrYT4bXHTF-@r1r0hW|GS-v-BMcSHodfNuT69v)i9x02_W z>NS5=)57P*t80H$cg;(>S37Pl8)2+=*Bhhu&HA5qCN58|+oRac_nZ1D7|-|oU$}5v zKCBoQ_FW??;*=w?iRn*;``8!(;~DYfZt%G4KZ&z8r#0#Y0W)z!H8Am}7(Cz4dorop zbVUjH32l+8zWcG^?b*^wFRAO&8~ZanrLgrAcN|{<&>E=Bu1CU)}N4>({$d^s4`g-rB zAn|_?)m}Co*;ni~orYNmg-?>i_`0j3r@|pFnH9dbi>Pc$sM)k&x~9m5ZGpsWK1O!kp7y%=Q)x^Gi*(7K8yrwFaTGOTr*cPvXBe;^qs>{{ zcg7*4VjrbD#9J#`(A4fF2{k$}&|BoKq7ipaVuKagVIh;B4qH;F{Jar0V|jLXGSL|+ zR()17bv?@$Bv!{n{8JA3r^N^3VuDmcLh*{F8|i?dbF5hhv65N6*+7=DWHS#_-+>{c zXZt{xvfBz|#KT60=($>jT@SE3Y||jWSS(mQk%lQ|TxBoE-YuyWNG%mBb708s(D1TI zT@7HAhLvh~kJA0@X0K7vFWZd^e`ofABoFXlHTjbU_kQwN0-hL@i1e%P(UkTqczuJ+ zF>%>$h(TM)Iz?HtUa+6zR+*~qit2(Fxw zV1#NY=T> z^7}W2n?mHnt%$Z5_%QcEQ>YWTD)y65$^1wK*+2R!X_Y+^8aZVc!Yx_7q*o4!p`&dS$5-x1x%QcR0VA+b607ITxH0vXclTxu$PM*wx#v~k{Z4i> zsrxv9D1-C7;9{Afe_d2^-EKU=_5OH7SqFe8M_HSZ?XC*}pZ0%HgSpOnO+eoCS@PC8 zsVvu3H;QE}n{lB$jBm^SOsu?`%rPNjVY+~#IM63bVFNzv; z81U7r+S!2gWOKC|Y>yLhJ_~ZVqRjt@VCtDCCtZJDVHs?8(?v+{XzmhYpB2+qM~?x;v$s%Ou1=JR7v0Lmdt1 zN-AygoLnycx?!;!LQV^Hj7y)30&#KI_UK>O*HuUji(7vrO%vbXf3Lc8;6Y&vBGt!u zlDdTUn9Ry|#8;1G5BrGIx3GS);mz6bGc_mFg-5ejN3Dy1fub3TJ(%)o9NEB(Rg^$B zuUC((Qte@Fu=GSqMwNvxh?8L^E5d&4p|8HjUITKcN!`a0wd;!zlt-mclu8|X9Fux7 z(E6rGigU|P278$Tj%^7p9ZL$$&!C8p^t~Nf#Se?BFdJ<{E6v*w5WVq^l&y$5WHA7`3VBwwIdP7&!}!&B7y|F)`&>11y;?@ZKXM+SZ^tf#Gjrj! zez!DJN~}qE`J)2l)xgIG(iu{x+4`I&eq~D(r*v90my9fK&V{5w1pQ_yBZRu(q023A z?3{Y`7lku5L)9@tJWzy4bL(^x`1l2=K%QnNL#DemH2sDdrJ2fDl z8;BWFk?C7IzYrF&6JMZyz^qPHr*n@))j7z8jj?r z4u^N(9Q=!Djt9?hmRY_FPGcJ`YNtirTh;I;6yp212-B$wuK$+m&)P&Y$@9zt9W^Jo}!WPFGj!&jSLnEz&2)BrBYRrW(V_;h4_ zMv}Co%`xW*Q!t z0#K61U-soEbh9-H6B^cff9C$n`KP0OS*>Iy`^55TF@LYP)h93t2bgng2y%6$v|M<& z>~px79vGtxFz9Es;Z*1bjwajax%~qkW;f0-4Fjl>X{P4kBD>4H;Rc|Zdgt#5|%MHM$HQ% z&X48V({y{KG9IysOo>BU5i~na3CFDAA9ebaW#E|8i+VBk0y7bTjR>ch+})rg~K_wVH7T99%KtVI9G&jL1EBE0EU)gD{~(Ngiuk7a4sAhqz3n`vg<^{ zs>+zElvL0`V6w&~YY0=ADm<}}%_WxS{8()74FAaX9D3^sJP+`(0}V__zMJB}sG+sOD9ofUrr zZZgBq|axd~MakQygV5m<^%CmxRCkqkO zqx9PlMpD-ti~{f;e~Ej6`&|nyL>Yv7^jsC*C*7omfwO*<13R>a8zSQ63}LLGW#6<+ zBaOoAmxQTY6T2^Wo7y*Gj+RJ6=yXmc6rVNG_ z9`RCl>wp^VjpnDQ$)pc<>8r2(_t7{RP0{nO$IPS94q>wcQMBZ=|n_~~h5s#Q~Rcse)_ zL7{L0Ocp5sBYxQq%1;`($+BBICJcO;yxnxJKqWHqgDy5@-adiiiyVswE*fDFHuh}= z3zv)e$z|e)eB4=Omi_ha{lQVoAZ!7|a%S+X`_*_QSmAH`FPuRtDG2Q>(R~>hRnsFP zxTliU4IB`(LA)Gdz8zEHz>*e$uj`#PguJ}ga5#)?;p?Kq15_Ku-GY}6RXju3FH5^1 zX>Mq!lO#AY{Ll4kwLaIe3_$BNpcp`RmJB0LI)Jl18F-rvdlwia|SjpZNOGy&P%o<2EwN}2jMF2(!>+rQ>G G;Dg#3RTTgL literal 0 HcmV?d00001 diff --git a/_tests/dev-null b/_tests/dev-null new file mode 100644 index 0000000..e69de29 diff --git a/_tests/test_main.py b/_tests/test_main.py index 1502415..8018ad8 100644 --- a/_tests/test_main.py +++ b/_tests/test_main.py @@ -1,44 +1,190 @@ -"""Test cases for main.py""" +"""Unit tests for Daylio-Obsidian parser""" import unittest -import shutil # to delete temporary files -import subprocess # to run main.py -import os # to make temporary directories and join them - -FILES = [ - "2022-10-25.md", - "2022-10-26.md", - "2022-10-27.md", - "2022-10-30.md" - ] -TEST_DIR = os.path.join(os.getcwd(), "_tests") -COMPUTED = os.path.join(TEST_DIR, "computed_results") -EXPECTED = os.path.join(TEST_DIR, "expected_results") - -# TODO: maybe E2E testing be more integrated into workflow? you could launch main.py directly there -# run commands like cmp -s file1.txt file.txt afterwards to check if they're ok - -class TestScript(unittest.TestCase): - """Tests ../main.py""" +from handlers import Librarian +from handlers import DayNotInJournalError +from handlers import InvalidDateFormatError +from handlers import EntryNotInJournalError + +class TestLibrarian(unittest.TestCase): + """ + Tests the Librarian handler-class of the journal. + The Librarian is responsible for parsing files and outputing the final journal. + We use internal class methods to check proper handling of data throughout the process. + """ def setUp(self): - """Create temporary directory for test output files""" - if not os.path.exists(COMPUTED): - os.mkdir(COMPUTED) - - def test_main(self): - """Gives ../main.py a CSV file and checks whether it is identical to the expected_results""" - if not subprocess.run(["python", "main.py", os.path.join(TEST_DIR, "testing_sheet.csv"), COMPUTED], check=True): - self.fail() - - # Open each computed file and check if it's equal to the expected one - for file in FILES: - with open(os.path.join(EXPECTED, file), encoding="UTF-8") as expected_file, open(os.path.join(COMPUTED, file), encoding="UTF-8") as computed_file: - self.assertListEqual(list(expected_file), list(computed_file)) - - def tearDown(self): - if os.path.isdir(COMPUTED): - shutil.rmtree(COMPUTED) + self.lib = Librarian().passMoods("moods.json").passFile("_tests/testing_sheet.csv") + + def test_no_moods(self): + """ + When passed a file to parse, Librarian obj should inform about: + - no moods.json file if it hasn't been provided + TODO: Librarian should pass a test if passMoods() was called before passFile() + """ + with self.assertLogs('runtime', level="INFO"): + Librarian().passFile("_tests/testing_sheet.csv") + + def test_bad_files(self): + """Pass some faulty files at the librarian and see if it throws correct erorrs""" + # TODO: maybe generate corrupted_sheet and wrong_format during runner setup in workflow mode? + # dd if=/dev/urandom of="$corrupted_file" bs=1024 count=10 + # generates random bytes and writes them into a given file + self.assertRaises(ValueError, Librarian().passFile("_tests/corrupted_sheet.csv")) + self.assertRaises(TypeError, Librarian().passFile("_tests/wrong_format.txt")) + self.assertRaises(TypeError, Librarian().passFile("_tests/wrong_format_no_ext")) + self.assertRaises(FileNotFoundError, Librarian().passFile("_tests/missing_file.csv")) + # TODO: make this file locked during runner workflow with chmod 600 + self.assertRaises(PermissionError, Librarian().passFile("_tests/locked-dir/locked_file.csv")) + + def access(self, date, dated_entry = None): + """ Alias to detach actual object names and their methods from repetitive test calls""" + if dated_entry is None: + obj = self.lib.accessDate(date) else: - raise FileNotFoundError(f"{COMPUTED} is missing at teardown.") + obj = self.lib.accessDate(date).accessDatedEntry(dated_entry) + return obj + + def test_is_day_filed(self): + """ + accessDate() should: + - return True if lib contains Date obj, and return obj + - return False if lib does not contain Date obj, and return empty obj + - throw ValueError if the string does not follow day format + """ + self.assertTrue(self.access("2022-10-25")) + self.assertTrue(self.access("2022-10-26")) + self.assertTrue(self.access("2022-10-27")) + self.assertTrue(self.access("2022-10-30")) + + # TODO: __bool__ method should be falsy if the object has no child DatedEntry + self.assertFalse(self.access("2022-10-21")) + self.assertFalse(self.access("2022-10-20")) + self.assertFalse(self.access("2017-10-20")) + self.assertFalse(self.access("1819-10-20")) + + self.assertRaises(ValueError, self.lib.access("ABC")) + self.assertRaises(ValueError, self.lib.access("2022")) + self.assertRaises(ValueError, self.lib.access("1999-1-1")) + self.assertRaises(ValueError, self.lib.access("12:00 AM")) + + def test_is_entry_filed(self): + """ + accessDate().accessDatedEntry() should: + - return True if lib contains DatedEntry obj, and return obj + - return False if lib does not contain DatedEntry obj, and return empty obj + - throw ValueError if the strings do not follow day & time format + """ + d = "2022-10-25" + self.assertTrue(self.lib.access(d, "11:36 PM")) + self.assertTrue(self.lib.access(d, "11:40 PM")) + self.assertTrue(self.lib.access(d, "5:00 PM")) + self.assertFalse(self.lib.access(d, "12:00 AM")) + + d = "2022-10-26" + self.assertTrue(self.lib.access(d, "10:00 PM")) + self.assertTrue(self.lib.access(d, "8:00 PM")) + self.assertTrue(self.lib.access(d, "7:30 PM")) + self.assertTrue(self.lib.access(d, "1:00 PM")) + self.assertTrue(self.lib.access(d, "9:00 AM")) + self.assertTrue(self.lib.access(d, "7:50 AM")) + self.assertFalse(self.lib.access(d, "2:05 PM")) + + d = "2022-10-27" + self.assertTrue(self.lib.access(d, "1:49 PM")) + self.assertTrue(self.lib.access(d, "12:00 AM")) + self.assertFalse(self.lib.access(d, "2:59 AM")) + + d = "2022-10-30" + self.assertTrue(self.lib.access(d, "10:04 AM")) + self.assertFalse(self.lib.access(d, "11:09 AM")) + + self.assertRaises(ValueError, self.lib.access("2022-1", "12:00 AM")) # wrong day format + self.assertRaises(ValueError, self.lib.access("2022-1", "2: AM")) # both formats wrong + self.assertRaises(ValueError, self.lib.access("WHAT", "IS GOING ON")) + self.assertRaises(ValueError, self.lib.access("/ASDFVDJU\\", "%#")) + + def test_getters(self): + """ + getX() retrieves a given property of the Date/DatedEntry object. It should: + - be equal to the expected value if we query an existing DatedEntry + - throw DatedEntryNotFoundError if we query an empty DatedEntry obj + """ + d = "2022-10-25" + note = "Nulla vel risus eget magna lacinia aliquam ac in arcu." + self.assertEqual(self.access(d, "11:36 PM").getMood(), "hungry") + self.assertEqual(self.access(d, "11:36 PM").getNote(), note) + self.assertNotEqual(self.access(d, "5:00 PM").getMood(), "rad") + + d = "2022-10-26" + note = "QYuip." + self.assertEqual(self.access(d, "10:00 PM").getMood(), "captivated") + self.assertNotEqual(self.access(d, "8:00 PM").getNote(), note) + self.assertNotEqual(self.access(d, "7:30 PM").getMood(), "blissful") + + d = "2022-10-27" + activities = [ "allegro", "working-remotely" ] + self.assertEqual(self.access(d, "12:00 AM").getMood(), "fatigued") + self.assertListEqual(self.access(d, "12:00 AM").getActivites(), activities) + self.assertNotEqual(self.access(d, "1:49 PM").getMood(), "guilty") + self.assertNotEqual(self.access(d, "1:49 PM").getActivites(), activities) + + d = "2022-10-30" + self.assertEqual(self.lib.accessDate(d, "10:04 AM").getMood(), "vaguely ok") + self.assertNotEqual(self.lib.accessDate(d, "10:04 AM").getMood(), "captivated") + + self.assertRaises(DatedEntryNotFoundError, self.access("2022-10-25", "1:13 PM").getMood(), "rad") + self.assertRaises(DatedEntryNotFoundError, self.access("1999-05-26", "11:15 M").getMood(), "blissful") + self.assertRaises(DatedEntryNotFoundError, self.access("2022-10-27", "3:51 PM").getMood(), "guilty") + self.assertRaises(ValueError, self.access("2022-10-26", "29:04 AM").getMood(), "captivated") + + # Try to query a property of DatedEntry (e.g. mood) in a Date object + self.assertRaises(AttributeError, self.access(d).getMood()) + self.assertRaises(AttributeError, self.access(d).getActivites()) + + # Reverse - try to query a property of Date in a DatedEntry object + self.assertRaises(AttributeError, self.access(d, "10:04 AM").getBoilerplate()) + + def testOutputBoilerplate(self): + """ + outputBoilerplate() is normally a private method, but we want to check if: + - it throws an error when trying to output without destination set + - it throws an error when invoked from a missing Date + - it throws an error when invoked in DatedEntry obj instead of Date obj + - it returns True when succeeds + """ + d = "2022-10-26" + # at this point the destination dir has not been set in Librarian obj + self.assertRaises(UnknownDestinationError, self.access(d).outputBoilerplate()) + # now we correctly set it to output to pwd + self.assertTrue(self.lib.setOutput("_tests/").outputBoilerplate()) + self.assertRaises(DatedEntryNotFoundError, self.access("1999-09-25").outputBoilerplate()) + self.assertRaises(DatedEntryNotFoundError, self.access(d, "7:30 AM").outputBoilerplate()) + + def testOutputPartial(self): + """ + output() for DatedEntry obj should output its contents into lib destination + Actual comparison between outputted and expected text at github-workflow stage + """ + d = "2022-10-26" + self.assertTrue(self.lib.accessDate(d).accessDatedEntry("7:30 PM").output()) + self.assertTrue(self.lib.accessDate(d).accessDatedEntry("8:00 PM").output()) + + def testOutputFullDate(self): + """ + output() for Date obj should output boilerplate & all its DatedEntry children + Actual comparison between outputted and expected text at github-workflow stage + """ + d = "2022-10-26" + self.assertTrue(self.lib.accessDate(d).output()) + + def testOutputAll(self): + """ + output() for Librarian obj should output all Date children + Actual comparison between outputted and expected text at github-workflow stage + """ + self.assertTrue(self.lib.output("_tests/output/")) + self.assertRaises(NotADirectoryError, self.lib.setOutput("_tests/dev-null").output()) + self.assertRaises(FileNotFoundError, self.lib.setOutput("_tests/it/does/not/exist").output()) + self.assertRaises(PermissionError, self.lib.setOutput("_tests/locked-dir").output()) # is this run as a main program, not component? if __name__ == '__main__': diff --git a/_tests/wrong_format.txt b/_tests/wrong_format.txt new file mode 100644 index 0000000..ccb1d35 --- /dev/null +++ b/_tests/wrong_format.txt @@ -0,0 +1,2 @@ +this is just a text file +ok, nothing to see here \ No newline at end of file diff --git a/_tests/wrong_format_no_ext b/_tests/wrong_format_no_ext new file mode 100644 index 0000000000000000000000000000000000000000..44f254ff99f153ec7bf39a7dd7b633a5ce07f709 GIT binary patch literal 10240 zcmV+bDF4^s1v%nWWYvvbUIX;+Y%?z2dI}u`$ki zhy}W#mn|6EM1lE|(L45@bRqxFJN!ln>#vR3#gr+q2_ZStmy@HMU=$gyruM7w_=xP! zT}(ePdv1Q~*MV4VA;U8xYWUNO9kky;1w!&8atHi#53^iXTtWMrSSk6QeN1UQO7zUF z&$-Y%WZX_qoA5f8e|4^u|1SNck5pt$PFC?rA~;vPk&AfQb4e5??x*${4}Kn>%^$z0 z8f@p`k1R*jX%tXci?(wEVvs7FMt(+Ijl=W0&oST{VgDDed;^(*)5S>Q1GAv59drIx-sM_yPbIR$VP?0Q!d;uuPD{C9hD&Y*(9sqtDavM5!^ z2RsG)Rq1%3$*jhs?z<#)*_Dc+J={!SH26j^1^E2x{YTKM{|>509;-N_Bo2wZhcehF zgS=B%ED;(ZUN|MA6k}l|X%W=!lQBtZR=ZG$`6JlA=@pNdUN%x~1_A}q+xu2Qx2JBr z=~aO)I~2KSfjy&Bm1o`!^Z)$3} zNGJY*b9by4LyaL6Pre{lAqbA>3gwQtrr-9Qih|FkEkdfDxVjCmE`#6+*QrVDXauCr z_X{_Dmjo&x(C)S_r-jI-~=@xO^QUQIbzXV*2`BY5O)SA$h$2M z7}%$*iez5{#^tx!X`v_NQ_C~>3F0UfloRyd=C!V($m!X1IleiUEWKzSa<7>5Kmbr( zHoBUT@Md~e%HT&et0m$3Lsx;4JW&N0fBK_n4@7?I=4wwjOpm`LhjJ6stG{L(Z)u#} z#%ZOS+6W}O2}h@(O9hSgq`8yy3Onnyp|N=zUc3gnBy*`?5jV}@)wvMc8u+_}qz#h8 zsYA3asANwTwEB_sB0fVS8||*mv=vx-)>!D!;+Y2DwH0zACwX_kW>dguAK5KwwQO0D z9I-f4S~+Sp7iF>6Ril2Ymp;!9z^gXu4^skW9{;_n%uGbt7O>#6lulx0qLLN!Dd4q+ zJ){GiKo8cBd1gz}({>JhMCZ{SoZKf!;|6GRht2)EJpfO z7l5~3u1E|asq0hY;pEg^@0|6Bh(IL{nFVaMk11KYvQVC{Wsl^J0Gs`q3ZJhF%xnFS z7gvUmu|Hb_eex4LCwWoq1^8K_fjhbhh}hC#9pjCA(Z$d&X(J}!XeN1>&G6MoNv zi~{PwKb_7P%a|;m&$!DMp+$FB;K10|6ZMnh6gKOlSamh* z>U14{yI?MH?z&`N8uIUWcIs*|HX)}4>euo@~5n#HjF-@C`FXhUA zzx6a9J|akf9fFKsG;#j!*_CkN!2~H0mow5Li2kY*xLv474$xpR{AQe&ILdFf83c1& z*;1-*E)!mB31btLvwU3UGC*ETe9X#oyMK zX7>)$7a966Mq;hXR4~cluO@I^n4$;-qpNs!psu+B)B3=Gfz;alTX^Jm%Au|a&Tm6hx zGVPn>tfT64W8@lYo4o!vJO~Er4fP8@|C9O+d}KeHHcG9K)3BzB#s4|D9%>e%Cj_S@ zkoSSD-N6}GyLz_qEP6(yr9&MiNFXJwA}{IbV!9HLNKpLI`09<_>DfptKW z(CRH1N4ZxX(e9PLbIg5}i5B{O3yiLneoEot>;{zxh(MFJr|C#p(!fx9QSF}V^_(?8 z+hHm6o7Fz{CqUt~`g)}}CCh1;k5YstK$yFl=Ao~C#u78XC0Vx2xPk>1t>F9O(sltB z*4C3|PHOAz)Zh^>c65%xV6EM>AFqlKgT}zT^p!Pbyi!T*DGQMO!V~@yp#50*YK~@_ zSUVsuX=DJ*PK>b4Gn+XvIdHbzugXl}FsR#oDt)*(z-I#qQ)!98f(wb((Iku(q+;Jq z$zsXL?fT`L@U%t@-O%ZR(!KW|1)p0bz*IP1g-h)gXe@LT=T#3PoWHrIC1cxlw_(pK z=$?+tz58W5$Go?ny8WUUd#o2{nj3snlzv*30fPBA#a!20jEce87TAf?W!GVVZerY@w?%(=O5$rxif5vj;qTMX_^awlwFb80qhi8 zk-%uBBMP`o&Exuyk`#}`_XGg0vX9Qy)@cX~XqAsir&c)L@D#$h2KpuT>w9jOJr%o#d!BQepje3bUsP=G_e{!U-$% zGj=V@wv#~-2=+MbuC7J+T})kQY{Zx936knMA0#$IV^kPBaUBsblQw^WTy55bwn7@_ zLGE%!+fb4c`1T|hJ{b)Ero}KH#gI-W%Fy)D0}hFPu6p&^p2mu~KSA1=!zV^`fBS0E za7eRcM>y};1D`=ti^SzTC#Jj>s8bW{I&M$j7m4Ibrau*cK8IL<~^!`?yz_)l&KI#;aiH)`F9g2P`oYy0FhR_HKR0f``Pj+qCK&cV$=y zCU@D5w%Y(yA(VtluD^XGh|;@gaL6~TzY-B_B4tg#1~o7s)WU zeH`s;1!z`)zd}*MKwuxIkwg7-yc>M9;nQhT_ISs(uzWV=dw^>KlZK?0s@-wb$tMiq zL!$NL9y8Ebs0WaR-f3BmMkhRK*oCHE#V?dQt?0~kD`~V%sykfvhRlD|w5;fwAuQ6F zP_+@t*hymvIttpB8YYg2aU8)~s= zG`uvuuTeHHsH1E z+=BwBFAK%ksfkFBya^mCXNKOBN z>im(h$VOnL^FzWdT?4%po%4oNee%F16-ssM0`DM~6qEkY2NTz`jkj!ruoE%>9#x-@ zZ0R+Z{FE~!7Rr4j(<-MY@uX;@|EL0@w`KS4-N>+JlYX;Cna*Mb$W1TX$v5^MRG_xl zC2u6z!+50F$;NTc>k))PaF9W=}svWV9HedgJp%Xsf9$M0;NycV3^QiB=%OG z9r~kiu*JKDTF*+88YyHYGy?gAkSbEL-JkE%;u#YUg7q!YR``66#pdCYlu{O8^=4S0 z1NbdwT)TK@P{HUDPfI?5O#Wti>FkbZti_<4z5thnu=h5^8x$q$>`VHxPnC8Eiw>eg zZ*PxQ@cAa=;e-C)3#|b_ga42hL1TLHwpH`@T_Z zS+|@dg%!Zl-0?cao3%}Q8Pupt@vk3K?)66|ooMVV`zvaHxS~1pRbkzL>-Mi;N~m82 zO*B^v{`YaVQ&+_#6~X0osyO zbOIvhK3*-!flvQ-xFc@yZANsd?CLUeGm*4dQz*G71|?HZvt+s=26%=x#sYw!%Ww$k z;xjz`9fPdF=~uJDal`56T%A)r+$N2yp|?MhH+kMPLJH5MUli0Yvrhs5O57lC6n^{q z3B{YkFOzaR*C(Ic?7b0!^)YNOH-rQSb-J z;);Mp+QyG4#jyJ`lAjg1Q4^Xh|EUo%Qp-bLuY!#F#?`0)m?+30O(5$Uic#upypAY- zBMnx!l8bG+4m5D}wVze)mX0t<9)l15t89A39Kr&=G0S8t<~DU~-=l#A+taFjMzY2U z|G~QeYsIdbYR5sU_v(ni{F4@2Z*=BP)mfwdm;ST4K8SXo!{_~ClT=CHDZ^q!+7GQ` zQ=s~ZS-_qzeQyvdU{{^t_A;{g=d}r$@!YFUM+R)*Y$5=%BBZ0@6#rt)El{aGF$|Z( zUfHlY5xYCLa(Ap?N;vq2#_?)bXS`CxkfjzCRk7y>Kk02MNHDaaxlV+zfu&SbqGDCM z`9E;;bCWx|TWJ+kO)$-FEYlmPd{-O07fwE0*JH%}fo^*j3BwC4*m}F+6LGw7GVCAf z12Yrl$^{|r&CrYg;t)I1$ImziNzdS%yyy>$dP_UZ*hpz*jI9-40j#x(=EO%Zy%KT> z0~^IJm2t1_Qn3wZSi0b9pnxbrZO{foUpGlTLPR~jxP$xb_Dx3`qgnPTGcpy?VsHqn z-rK@t>f-WvZH*f_215|&grCvnzntG>-nZ*UjZ7z+tjaYsP{OR^m>j}6MuDIH5-VPX zS||5nTW%3$S3sP3)wcGbSoF>PnT{>ElU_) zJOeK;Dq}t`A&`m(T?VeKlXC`;FVt#uqGTaWObeySaXa_~IZ84`+EGk%q_dbttm7+If-OMR!m2@2(-Qbgtzh#8-(?xBsK~ar$ z?Yq$JfvC5s-V*P)D-ML2rETN(->nmW``h-k87as&P|M*1Ai2M$#jy8mj#)M+<32SK z56DFPn^BN76GeN-Gov=M^b&(b6uKP|yJ;Ka93oMQxIWo#Jtdx^Qj&s1c=YxK{5?Ep z*~OO?ezY#;tUvJCmJZ@?eh6(UzF}bPo%v;Oc(7)ZzdiY+Zve_~#M){0S zsxx(OU&pIGcKNQpm<|gnKLS!9=q?f(={+SFXrab1*nx1liywpO3TsSO{dj1!T*W3o z5QiV{4c{L6PeEqfBcE`9lNk1}p~XXNNlU`zd#((1q;-s;!p%oM!4=8SNy5xHSf^-V zIG@)TZ$Y`0D+Cj`1D9T6-%iO}vL3ey6RYYS zOg0bT6Tr}wOpa+&`#9GPSI&%Q(9ii@!WFUQ7jXvmGB3kD`g|%q862jq<;fg47ylUi zofQHmYw4d+nc@|+ybsg)w!4ajAJJtQ$EH^Wh~MRm8-Je)2dNp0sN|?mjl8T?-?lN6 z^L$H#tfwWh_D&%`ewIZekg5*YS_c7xc}GYyFYmrqBbIqqx%h2B<4^`FKA0f@QcWmd zAgH1#cdTrn3UrLT0?VWm^M&LPb&s_x`&Zojrk!H}?aEzlfTM&0`2PYWV6c0Mlm8F9 z9X0ffQfOT6%et@u`GyUYoR~9U6TfJVCh1f!GS2Ecd36rH|qDc z4=;5(LMfN4tu|Qn`$48_AK{Y-X!9Vq!50qor* zSH%9SaN-)*6E&=)`aqd7x1{r2%N^?R)CW>KKb6s^aUw>4oJP5<)$C14D|;Bl?!!maAe?6HlW&Onhr!zmiqcKnIr7n z)iQ4$mZrUvBF-D-ZG)kW+ia4YY{sz?sr>Qj)X3Dz73B8T3mXD%ETE)^9pafILn6?v z1pv-XfSTByN5hL+Qa_B*3*Z@O+#TFcQ)UPN3;H9QR!9F-imvLnU=)=)^?j6)4!(a6 z*CHPx{Zm*pjz1dAJ}rqQ2p68uFWB617%4SqWB7)RO{&c>)x}({dk1~lL|v-7GcsPT zDG6o!3m_m2h28IXlmn9y83Xbx=}ywK;ZjE~MpCegA)f$E5F~8mmKCvIlIff}x?@CL zh_$k7zYf#zQ5hgo6RUtt(Q}J8Kmck$v13Sf^1X-@8$cAVR=)bJj+$4Z- z%iMKoAy8?`r&%fi;$7*>a%+yIW-So5$=d|rgF`WIkeDlu#lq1P;D-%_P3Zw3C+_Z;{j312?+m=urX{x z8KE2mef;g4(?MNRorr$u62y?p%4a_|6V+an9Wx5}X9cZQ_Ef{(#vcS}vqgDQ>pKG@ zw_I5dJCH)?s@z{)LiXxhlJRz7T2`on>aCqd_?&~9hGg^WXOasHy4#QteL!uu5cT~I zuf&qRMd8nzf+NL=^z{rJc|kF&Klc=W=4HAFyN!Azz;VxWVLogHC06qQ(>3UcBMVo1 z`k;;j-RgNd#JyOfvmj{+gBRWsn3oa%Y7ovIYx%RQBQG;lyD1b|zrzUT2mqG1VR5A3 zC@hG4gIObv8sUlVhL6Zv6c=r!p_kJ&)|ZMj zT$g1nPo;h}J>bhNWbdp%9Zli$DhcWpfLHTxB_!pX`6ocIy}Juqe1*7SFp?bb=69b( z+D}Edj4libL&KR9=ON})@S?z;r3s;{tX@dh*n6es7`ZEIeGk}=^#Ic>z$m>+vqtaq ztcPD4zBhPb=OA~cBKuP^5xWY}x@9|leYP2qVeFRW^MhMdLgDe4l6?IwU2-eimL!Ch7s2RrfsVx{bt<0pse>>;g@3l~Q|Cj7B20(vQ4PXCRa5v;{`Cc1V13ACqKO2o zB9-Mjw%p%R+s!B*1{osIjsAZ|MIrH3y<&SI42Ou$fY8@n)a3z3PHz9>i|I$Pz;#xR zLIE}n&)IdMzSQ3zG0{=%g^}GTjK#_Y9JLA3Iw0Ywpyjxr%L&Z$0yb*55G6D^H3_75 z@OH=YFa}dNFpXxDFnD&0#A7w&O9uJ=`$nN*<64lmcT zFJz?>Ym2_uef6$rOFa)QNbvFnxprIc(e0nm?rC8MvX^4tuHq6vY9V$d4oF<|o-!#} zD18pGTOJti+dl!KWXS&qaDyM6^+eEJAT|F88!#>-CVB8$xvv_*5WFNBn{tzzA(;gX zJM|{pjC6#ty`Gz0lFs)fEQ*TaZ`&Kz$hZKR%kULTdA+U2;RzVc@wu=x)_V1r2;N z$m_qmEP>h)e-1ddj!L9fqImPai0hn->*kSJvh0aXz~C}$7e}B8dUIL_u}R~m?g@H? zop84NUNMN>=skuW)U`@>d~8x7artY!3dL#RBtQ2Grkg<3ehT|g5KFk za-O9iJ7P4{+Ai78n4PX$LGm6lOmtJN+g}sbVJDeTojdQ2FUGN(&NDvCUkSRB_96Xp zwN=hNzg`j-r*731(FLqH(1(dMV$*ZdoCgxdu) zN~AV;gc`GZOfR(G(PE&GZ($1b-`CeasaxnAMPQcpI`g~NEPf~qag#p3)!Fuw`RNRl zNDRPd5U0*^VI-ozzyvt4S={cy{oup-Lq>I<0ONAzyLk zh8f3g^f2C%(u}E9j>K!Q)cnhG?F7l4@?D{lzkUuEaD(nI-A@r66U*=LDcgdW#Y%94 zQya{&obCkNKx;#=yZ8$TPP5eK!7e6!6(IA~#@nq~-N*%p@x$C&-2poqDwTf{NsFbNNvRkQY0C^Dn9Sj-sF7T`NVb0qt_ zSlKmZM2dHKc*5xDAS{mYI6;=ta|ms@Wc%J&->Y`;>v}p|iF~``F>4I^w2oH9z@-W! zm!~ntICSNPUhAiKC9DS@r2}=XG10WLUj#}u6KmR9xKTKfsFnfvGUkbFm@z@!%WpUZ zg2Jn>bBbrqf%pi3(iw?RfyAg+JQftmhJ#?wF{6y)W|5DJwEjnGoxp)T*i^hup93>xd5$m+P z@6Qnu&n60bD`1muW-aLOQcMnijZ(|ddhz5i*3AY&lumwRnFnwnM$ti_}%Y7~g`z{<5ZcqKd z$HtyIOj$^G?+?p&yAD$MPH$EUKVQXM2C=xEyI{Q(S4Rw`B0^&{r7#u!m3sn zYDv^fRfxtAMo&8tPRYBELBJ^X^aC=WTap>P)7X*3;)ajZqt=H33$n}zl|LHikcTdE zh&!CnEZOW`=fj<=)@pdPOI`$Aa@U>$IZqw=K2P9uc<#)6pWnpDkhy@icsLaCjbjFrZZ%j7HJKEPVvlAmZ8Nu+;U^Q` z-)bM>E8arWPocpmUs`P!!D7<@n1~)9Ek}-(00zeVT{E5#ozVtLp3K<<(P)FGGOH~V zMShY7-qP7z?~=&U)2e~*l`Sp7QlmrR`>cNMOK(_}eh zCTkxcdiP2eWxSPiS+97i!_cFCI~yF|CabX#VOIU?OxjRf^=YNQ)~32riwrAxnu&?! zlDw*&^#C%Z)JW|7_QZQ_Z}w6oAkyFrs5p7IcOl5<7z7kd(kcV| z&u!VwrH)WMgAvOH@*xPM3tfR`edmN(^8h%1Vo{gzi0asx%a_oL@xuo6gt zI$q})v~tE324!5NN`H!YC`L$%6f%VCjuJG*(kB3Oi2U(!tL6&Bx%FZYHFeQQk14D+ z4|d@!qSn-=HtR+zSchiZuj(m-d%fD$>9b>v2|w#Seh4vswg5{n{xcidpNCaIy5tj^ zQgmOSu-&-~ncSIqvWu*O#yRcSDAJRxQTf6)^W!g|9&^ncFFXH+sk9 zu+l5o2`pJU-3>)h{s2f|Z+w zvDl?yQZJD}ab!g!s|YA(R`Y0T!7=^d;taSA%m5!fmhzSTsOL&9)^5fMcoS|YGu4d} GyvfpC8{ljJ literal 0 HcmV?d00001 From 6bcf6afe1624234bacd1b5a8016b9e9288b85a83 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna Date: Wed, 8 Nov 2023 21:17:16 +0100 Subject: [PATCH 02/40] chmod changed to 744 -R --- .github/workflows/pylint.yaml | 0 .github/workflows/test.yaml | 0 .gitignore | 0 .vscode/launch.json | 0 LICENSE | 0 README.md | 0 _tests/corrupted_sheet.csv | Bin _tests/dev-null | 0 _tests/expected_results/2022-10-25.md | 0 _tests/expected_results/2022-10-26.md | 0 _tests/expected_results/2022-10-27.md | 0 _tests/expected_results/2022-10-30.md | 0 _tests/locked-dir/locked_file.csv | 2 ++ _tests/test_main.py | 0 _tests/testing_sheet.csv | 0 _tests/wrong_format.txt | 0 _tests/wrong_format_no_ext | Bin cmd_args.py | 0 compile_md.py | 0 load_moods.py | 0 main.py | 0 moods.json | 0 parse_csv.py | 0 utils.py | 0 write_md.py | 0 25 files changed, 2 insertions(+) mode change 100644 => 100755 .github/workflows/pylint.yaml mode change 100644 => 100755 .github/workflows/test.yaml mode change 100644 => 100755 .gitignore mode change 100644 => 100755 .vscode/launch.json mode change 100644 => 100755 LICENSE mode change 100644 => 100755 README.md mode change 100644 => 100755 _tests/corrupted_sheet.csv mode change 100644 => 100755 _tests/dev-null mode change 100644 => 100755 _tests/expected_results/2022-10-25.md mode change 100644 => 100755 _tests/expected_results/2022-10-26.md mode change 100644 => 100755 _tests/expected_results/2022-10-27.md mode change 100644 => 100755 _tests/expected_results/2022-10-30.md create mode 100755 _tests/locked-dir/locked_file.csv mode change 100644 => 100755 _tests/test_main.py mode change 100644 => 100755 _tests/testing_sheet.csv mode change 100644 => 100755 _tests/wrong_format.txt mode change 100644 => 100755 _tests/wrong_format_no_ext mode change 100644 => 100755 cmd_args.py mode change 100644 => 100755 compile_md.py mode change 100644 => 100755 load_moods.py mode change 100644 => 100755 main.py mode change 100644 => 100755 moods.json mode change 100644 => 100755 parse_csv.py mode change 100644 => 100755 utils.py mode change 100644 => 100755 write_md.py diff --git a/.github/workflows/pylint.yaml b/.github/workflows/pylint.yaml old mode 100644 new mode 100755 diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml old mode 100644 new mode 100755 diff --git a/.gitignore b/.gitignore old mode 100644 new mode 100755 diff --git a/.vscode/launch.json b/.vscode/launch.json old mode 100644 new mode 100755 diff --git a/LICENSE b/LICENSE old mode 100644 new mode 100755 diff --git a/README.md b/README.md old mode 100644 new mode 100755 diff --git a/_tests/corrupted_sheet.csv b/_tests/corrupted_sheet.csv old mode 100644 new mode 100755 diff --git a/_tests/dev-null b/_tests/dev-null old mode 100644 new mode 100755 diff --git a/_tests/expected_results/2022-10-25.md b/_tests/expected_results/2022-10-25.md old mode 100644 new mode 100755 diff --git a/_tests/expected_results/2022-10-26.md b/_tests/expected_results/2022-10-26.md old mode 100644 new mode 100755 diff --git a/_tests/expected_results/2022-10-27.md b/_tests/expected_results/2022-10-27.md old mode 100644 new mode 100755 diff --git a/_tests/expected_results/2022-10-30.md b/_tests/expected_results/2022-10-30.md old mode 100644 new mode 100755 diff --git a/_tests/locked-dir/locked_file.csv b/_tests/locked-dir/locked_file.csv new file mode 100755 index 0000000..644809c --- /dev/null +++ b/_tests/locked-dir/locked_file.csv @@ -0,0 +1,2 @@ +one,two,three,four,five +six,seven,eight,nine,ten diff --git a/_tests/test_main.py b/_tests/test_main.py old mode 100644 new mode 100755 diff --git a/_tests/testing_sheet.csv b/_tests/testing_sheet.csv old mode 100644 new mode 100755 diff --git a/_tests/wrong_format.txt b/_tests/wrong_format.txt old mode 100644 new mode 100755 diff --git a/_tests/wrong_format_no_ext b/_tests/wrong_format_no_ext old mode 100644 new mode 100755 diff --git a/cmd_args.py b/cmd_args.py old mode 100644 new mode 100755 diff --git a/compile_md.py b/compile_md.py old mode 100644 new mode 100755 diff --git a/load_moods.py b/load_moods.py old mode 100644 new mode 100755 diff --git a/main.py b/main.py old mode 100644 new mode 100755 diff --git a/moods.json b/moods.json old mode 100644 new mode 100755 diff --git a/parse_csv.py b/parse_csv.py old mode 100644 new mode 100755 diff --git a/utils.py b/utils.py old mode 100644 new mode 100755 diff --git a/write_md.py b/write_md.py old mode 100644 new mode 100755 From 846ff66a4ed71513ead23a0f88838fec969e0458 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna Date: Wed, 22 Nov 2023 19:50:17 +0100 Subject: [PATCH 03/40] total OOP overhaul with unittests all the way --- .gitignore | 4 +- .idea/.gitignore | 3 + .idea/Obsidian-Daylio-Parser.iml | 19 ++ .../inspectionProfiles/profiles_settings.xml | 6 + .idea/misc.xml | 4 + .idea/modules.xml | 8 + .idea/vcs.xml | 6 + .vscode/launch.json | 2 +- _tests/{testing_sheet.csv => empty_sheet.csv} | 0 _tests/hack.py | 10 + _tests/incomplete-moods.json | 21 ++ _tests/sheet-1-valid-data.csv | 13 ++ ..._sheet.csv => sheet-2-corrupted-bytes.csv} | Bin ...ng_format.txt => sheet-3-wrong-format.txt} | 0 ...ong_format_no_ext => sheet-4-no-extension} | Bin _tests/test_dated_entries_group.py | 126 ++++++++++++ _tests/test_dated_entry.py | 32 +++ _tests/test_librarian.py | 74 +++++++ _tests/test_main.py | 191 ------------------ _tests/test_utils.py | 27 +++ cmd_args.py | 67 ------ compile_md.py | 14 +- config.py | 109 ++++++++++ dated_entries_group.py | 89 ++++++++ dated_entry.py | 146 +++++++++++++ errors.py | 33 +++ librarian.py | 173 ++++++++++++++++ load_moods.py | 8 - main.py | 56 ++--- parse_csv.py | 52 ----- utils.py | 56 +++-- write_md.py | 2 +- 32 files changed, 967 insertions(+), 384 deletions(-) create mode 100644 .idea/.gitignore create mode 100644 .idea/Obsidian-Daylio-Parser.iml create mode 100644 .idea/inspectionProfiles/profiles_settings.xml create mode 100644 .idea/misc.xml create mode 100644 .idea/modules.xml create mode 100644 .idea/vcs.xml rename _tests/{testing_sheet.csv => empty_sheet.csv} (100%) create mode 100644 _tests/hack.py create mode 100755 _tests/incomplete-moods.json create mode 100755 _tests/sheet-1-valid-data.csv rename _tests/{corrupted_sheet.csv => sheet-2-corrupted-bytes.csv} (100%) rename _tests/{wrong_format.txt => sheet-3-wrong-format.txt} (100%) rename _tests/{wrong_format_no_ext => sheet-4-no-extension} (100%) create mode 100644 _tests/test_dated_entries_group.py create mode 100644 _tests/test_dated_entry.py create mode 100644 _tests/test_librarian.py delete mode 100755 _tests/test_main.py create mode 100644 _tests/test_utils.py delete mode 100755 cmd_args.py create mode 100755 config.py create mode 100644 dated_entries_group.py create mode 100644 dated_entry.py create mode 100644 errors.py create mode 100644 librarian.py delete mode 100755 load_moods.py delete mode 100755 parse_csv.py diff --git a/.gitignore b/.gitignore index d8f93eb..606714d 100755 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,4 @@ __pycache__/ -_tests/computed_results \ No newline at end of file +_tests/debug.log +/_tests/output-results/ +/debug.log diff --git a/.idea/.gitignore b/.idea/.gitignore new file mode 100644 index 0000000..26d3352 --- /dev/null +++ b/.idea/.gitignore @@ -0,0 +1,3 @@ +# Default ignored files +/shelf/ +/workspace.xml diff --git a/.idea/Obsidian-Daylio-Parser.iml b/.idea/Obsidian-Daylio-Parser.iml new file mode 100644 index 0000000..9ee7bc1 --- /dev/null +++ b/.idea/Obsidian-Daylio-Parser.iml @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml new file mode 100644 index 0000000..105ce2d --- /dev/null +++ b/.idea/inspectionProfiles/profiles_settings.xml @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml new file mode 100644 index 0000000..a971a2c --- /dev/null +++ b/.idea/misc.xml @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 0000000..77d5656 --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 0000000..35eb1dd --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json index c554f0d..87d7d0d 100755 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -12,7 +12,7 @@ "console": "integratedTerminal", "justMyCode": true // "args": [ - // "./_tests/testing_sheet.csv", + // "./_tests/sheet-1-valid-data.csv", // "~/Daylio export/" // ] } diff --git a/_tests/testing_sheet.csv b/_tests/empty_sheet.csv similarity index 100% rename from _tests/testing_sheet.csv rename to _tests/empty_sheet.csv diff --git a/_tests/hack.py b/_tests/hack.py new file mode 100644 index 0000000..59d9c2f --- /dev/null +++ b/_tests/hack.py @@ -0,0 +1,10 @@ +import sys + +print(sys.argv) + +# very dirty way to work around PyCharm's inability to pass arguments to test runner +# this problem only occurs if you run scripts in Python tests conf +sys.argv.append("_tests/sheet-1-valid.data.csv") +sys.argv.append("_tests/output-results/") + +print(sys.argv) \ No newline at end of file diff --git a/_tests/incomplete-moods.json b/_tests/incomplete-moods.json new file mode 100755 index 0000000..441ab91 --- /dev/null +++ b/_tests/incomplete-moods.json @@ -0,0 +1,21 @@ +{ + "rad": [ + "rad", + "blissful" + ], + "neutral": [ + "vaguely ok", + "a bit helpless" + ], + "bad": [ + "vaguely bad", + "helpless", + "misunderstood" + ], + "awful": [ + "vaguely awful", + "hollow", + "trapped", + "dying of pain" + ] +} diff --git a/_tests/sheet-1-valid-data.csv b/_tests/sheet-1-valid-data.csv new file mode 100755 index 0000000..a021e19 --- /dev/null +++ b/_tests/sheet-1-valid-data.csv @@ -0,0 +1,13 @@ +full_date,date,weekday,time,mood,activities,note_title,note +2022-10-30,October 30,Sunday,10:04 AM,vaguely ok,2ćities skylines | dó#lóó fa$$s_ą%,"Dolomet","Lorem ipsum sit dolomet amęt." +2022-10-27,October 27,Thursday,1:49 PM,vaguely good,chess,"Cras pretium","Lorem ipsum dolor sit amet, consectetur adipiscing elit." +2022-10-27,October 27,Thursday,12:00 AM,fatigued,allegro | working remotely,"Suspendisse sit amet","Phaśellus pharetra justo ac dui lacinia ullamcorper." +2022-10-26,October 26,Wednesday,10:00 PM,captivated,at the office | board game | colleague interaction | big social gathering,,"Sed ut est interdum","Maecenas dictum augue in nibh pellentesque porttitor." +2022-10-26,October 26,Wednesday,8:00 PM,tired,allegro | at the office | board game | colleague interaction | big social gathering,"Mauris rutrum diam","Quisque dictum odio quis augue consectetur, at convallis żodio aliquam." +2022-10-26,October 26,Wednesday,7:30 PM,grateful,allegro | at the office | acknowledged efforts | colleague interaction,"Aliquam nec sem semper","Nulla aćcumsan sem sit amet lectus pretium, ac interdum tellus porta." +2022-10-26,October 26,Wednesday,1:00 PM,blissful,allegro | at the office,"Vestibulum sagittis leo eu sodales","Ut et elit id lectus hendrerit ełementum quis auctor ipsum." +2022-10-26,October 26,Wednesday,9:00 AM,in awe,allegro | at the office | outdoors | notable event,"Integer elementum","Nunc lobortis enim eu nisi ultrices, sit amet sagittis lacus venenatis." +2022-10-26,October 26,Wednesday,7:50 AM,lifeless,podcast | politics | world event,"Nulla quis lectus pulvinar","Etiam commódo enim ut orci varius viverra." +2022-10-25,October 25,Tuesday,11:36 PM,hungry,allegro | working remotely | colleague interaction,"Mauris vitae nunc vel arcu consequat auctor","Nulla vel risus eget magna lacinia aliquam ac in arcu." +2022-10-25,October 25,Tuesday,11:40 PM,rad,,,Uet nulla nunc lobortis quisque. +2022-10-25,October 25,Tuesday,5:00 PM,vaguely ok,,, \ No newline at end of file diff --git a/_tests/corrupted_sheet.csv b/_tests/sheet-2-corrupted-bytes.csv similarity index 100% rename from _tests/corrupted_sheet.csv rename to _tests/sheet-2-corrupted-bytes.csv diff --git a/_tests/wrong_format.txt b/_tests/sheet-3-wrong-format.txt similarity index 100% rename from _tests/wrong_format.txt rename to _tests/sheet-3-wrong-format.txt diff --git a/_tests/wrong_format_no_ext b/_tests/sheet-4-no-extension similarity index 100% rename from _tests/wrong_format_no_ext rename to _tests/sheet-4-no-extension diff --git a/_tests/test_dated_entries_group.py b/_tests/test_dated_entries_group.py new file mode 100644 index 0000000..d5b2628 --- /dev/null +++ b/_tests/test_dated_entries_group.py @@ -0,0 +1,126 @@ +from unittest import TestCase +from dated_entries_group import DatedEntriesGroup + + +class TestDate(TestCase): + def setUp(self): + self.sample_date = DatedEntriesGroup("2011-10-10") + + def test_get_date(self): + self.assertEqual(DatedEntriesGroup("2023-10-15").get_uid(), "2023-10-15") + self.assertEqual(DatedEntriesGroup("2019-5-9").get_uid(), "2019-5-9") + self.assertEqual(DatedEntriesGroup("2023-11-25").get_uid(), "2023-11-25") + + self.assertRaises(ValueError, DatedEntriesGroup, "00-") + self.assertRaises(ValueError, DatedEntriesGroup, "2199-32-32") + + # Test cases with unconventional date formats + self.assertRaises(ValueError, DatedEntriesGroup, "2022/05/18") # Invalid separator + self.assertRaises(ValueError, DatedEntriesGroup, "2023_07_12") # Invalid separator + self.assertRaises(ValueError, DatedEntriesGroup, "1999.10.25") # Invalid separator + + # Test cases with random characters in the date string + self.assertRaises(ValueError, DatedEntriesGroup, "2@#0$2-05-18") # Special characters in the year + self.assertRaises(ValueError, DatedEntriesGroup, "1987-0%4-12") # Special characters in the month + self.assertRaises(ValueError, DatedEntriesGroup, "2001-07-3*") # Special characters in the day + + # Test cases with excessive spaces + self.assertRaises(ValueError, DatedEntriesGroup, " 2022-05-18 ") # Spaces around the date + self.assertRaises(ValueError, DatedEntriesGroup, "1999- 10-25") # Space after the month + self.assertRaises(ValueError, DatedEntriesGroup, " 2000-04 - 12 ") # Spaces within the date + + # Test cases with mixed characters and numbers + self.assertRaises(ValueError, DatedEntriesGroup, "2k20-05-18") # Non-numeric characters in the year + self.assertRaises(ValueError, DatedEntriesGroup, "1999-0ne-25") # Non-numeric characters in the month + self.assertRaises(ValueError, DatedEntriesGroup, "2021-07-Two") # Non-numeric characters in the day + + # Test cases with missing parts of the date + self.assertRaises(ValueError, DatedEntriesGroup, "2022-05") # Missing day + self.assertRaises(ValueError, DatedEntriesGroup, "1987-09") # Missing day + self.assertRaises(ValueError, DatedEntriesGroup, "2001") # Missing month and day + self.assertRaises(ValueError, DatedEntriesGroup, "") # Empty string + + def test_access_dated_entry(self): + """ + Difference between access_dated_entry(time) and get_known_dated_entries[time]: + - former will create missing entries, if time is valid + - latter will raise KeyError if the entry is missing, even if time is valid + - former will raise ValueError if time is invalid + - latter will raise KeyError if time is invalid + """ + self.assertEqual(self.sample_date.access_dated_entry("10:00 AM").get_uid(), "10:00 AM") + self.assertEqual(self.sample_date.access_dated_entry("9:30 PM").get_uid(), "9:30 PM") + + # Test cases for 12-hour format + self.assertRaises(ValueError, self.sample_date.access_dated_entry, "2: AM") # Invalid format + self.assertRaises(ValueError, self.sample_date.access_dated_entry, "15:45 PM") # Invalid hour (more than 12) + self.assertRaises(ValueError, self.sample_date.access_dated_entry, "11:30 XM") # Invalid meridiem indicator + + # Test cases for 24-hour format + self.assertRaises(ValueError, self.sample_date.access_dated_entry, "25:15") # Invalid hour (more than 24) + self.assertRaises(ValueError, self.sample_date.access_dated_entry, "14:45 PM") + self.assertRaises(ValueError, self.sample_date.access_dated_entry, + "03:20 XM") # Invalid meridiem indicator in 24-hour format + + # Test cases with invalid characters + # noinspection SpellCheckingInspection + self.assertRaises(ValueError, self.sample_date.access_dated_entry, "/ASDFVDJU\\") # Invalid characters + + # Test cases with incomplete time information + self.assertRaises(ValueError, self.sample_date.access_dated_entry, "2022-1") # Incomplete time information + self.assertRaises(ValueError, self.sample_date.access_dated_entry, "12:") # Incomplete time information + self.assertRaises(ValueError, self.sample_date.access_dated_entry, ":30") # Incomplete time information + + def test_get_known_dated_entries(self): + """ + Difference between access_dated_entry(time) and get_known_dated_entries[time]: + - former will create missing entries, if time is valid + - latter will raise KeyError if the entry is missing, even if time is valid + - former will raise ValueError if time is invalid + - latter will raise KeyError if time is invalid + """ + self.sample_date.access_dated_entry("11:11") + self.sample_date.access_dated_entry("12:12") + self.sample_date.access_dated_entry("13:13") + + self.assertEqual(self.sample_date.get_known_dated_entries()["11:11"].get_uid(), "11:11") + self.assertEqual(self.sample_date.get_known_dated_entries()["12:12"].get_uid(), "12:12") + self.assertEqual(self.sample_date.get_known_dated_entries()["13:13"].get_uid(), "13:13") + + self.assertRaises(KeyError, lambda: self.sample_date.get_known_dated_entries()["23:00"]) + self.assertRaises(KeyError, lambda: self.sample_date.get_known_dated_entries()["9:30 PM"]) + self.assertRaises(KeyError, lambda: self.sample_date.get_known_dated_entries()["11:50 AM"]) + + def test_truthiness_of_dated_entries_group(self): + """ + DatedEntriesGroup should be truthy if it has a valid UID and has any known entries. + """ + self.sample_date.access_dated_entry("11:11") + self.sample_date.access_dated_entry("12:12") + self.sample_date.access_dated_entry("13:13") + + self.assertTrue(self.sample_date) + + def test_falseness_of_dated_entries_group(self): + """ + DatedEntriesGroup should be falsy if it has a valid UID but no known entries. + """ + self.assertFalse(self.sample_date) + + def test_no_duplicate_entries_created(self): + """ + DatedEntriesGroup should return the already existing entry if it is known, instead of creating a duplicate. + """ + obj = self.sample_date.access_dated_entry("11:11") + obj.set_note("I already exist, see?") + + self.assertEqual(self.sample_date.access_dated_entry("11:11").get_note(), obj.get_note()) + + def test_retrieve_known_entries(self): + obj1 = self.sample_date.access_dated_entry("11:11") + obj2 = self.sample_date.access_dated_entry("12:12") + obj3 = self.sample_date.access_dated_entry("13:13") + + self.assertEqual(self.sample_date.get_known_dated_entries()["11:11"], obj1) + self.assertEqual(self.sample_date.get_known_dated_entries()["12:12"], obj2) + self.assertEqual(self.sample_date.get_known_dated_entries()["13:13"], obj3) diff --git a/_tests/test_dated_entry.py b/_tests/test_dated_entry.py new file mode 100644 index 0000000..8a46bdd --- /dev/null +++ b/_tests/test_dated_entry.py @@ -0,0 +1,32 @@ +from unittest import TestCase +import hack +import dated_entry + + +class Test(TestCase): + def test_slice_quotes(self): + self.assertEqual(dated_entry.slice_quotes("\"test\""), "test") + self.assertEqual(dated_entry.slice_quotes("\"\""), "") + + def test_is_time_valid(self): + # Valid time formats + self.assertTrue(dated_entry.Time("1:49 AM")) + self.assertTrue(dated_entry.Time("02:15 AM")) + self.assertTrue(dated_entry.Time("12:00 PM")) + self.assertTrue(dated_entry.Time("6:30 PM")) + self.assertTrue(dated_entry.Time("9:45 PM")) + self.assertTrue(dated_entry.Time("00:00 AM")) + self.assertTrue(dated_entry.Time("12:00 AM")) + self.assertTrue(dated_entry.Time("13:30")) + self.assertTrue(dated_entry.Time("9:45")) + + # Invalid time formats + self.assertRaises(ValueError, dated_entry.Time, "okk:oksdf s") + self.assertRaises(ValueError, dated_entry.Time, "14:59 AM") + self.assertRaises(ValueError, dated_entry.Time, "25:00 AM") + self.assertRaises(ValueError, dated_entry.Time, "26:10") + self.assertRaises(ValueError, dated_entry.Time, "12:60 PM") + self.assertRaises(ValueError, dated_entry.Time, "12:00 XX") + self.assertRaises(ValueError, dated_entry.Time, "abc:def AM") + self.assertRaises(ValueError, dated_entry.Time, "24:00 PM") + self.assertRaises(ValueError, dated_entry.Time, "00:61 AM") diff --git a/_tests/test_librarian.py b/_tests/test_librarian.py new file mode 100644 index 0000000..2bea20d --- /dev/null +++ b/_tests/test_librarian.py @@ -0,0 +1,74 @@ +import csv +import json +from unittest import TestCase +from librarian import Librarian + + +class TestLibrarian(TestCase): + """ + Tests the Librarian handler-class of the journal. + The Librarian is responsible for parsing files and outputting the final journal. + We use internal class methods to check proper handling of data throughout the process. + """ + def setUp(self): + self.lib = Librarian( + path_to_file="sheet-1-valid-data.csv", + path_to_moods="moods.json" + ) + + def test_set_custom_moods(self): + # assertTrue is not needed, because it would have already failed at setUp() + self.assertFalse(Librarian("sheet-2-corrupted-bytes.csv").has_custom_moods()) + self.assertFalse(Librarian("sheet-3-wrong-format.txt").has_custom_moods()) + self.assertFalse(Librarian("sheet-4-no-extension.csv").has_custom_moods()) + self.assertFalse(Librarian("incomplete-moods.json").has_custom_moods()) + + def test_pass_file(self): + """ + Pass some faulty files at the librarian and see if it exists. + There is no point in continuing the script if a crucial CSV file is faulty. + """ + # TODO: maybe generate corrupted_sheet and wrong_format during runner setup in workflow mode? + # dd if=/dev/urandom of="$corrupted_file" bs=1024 count=10 + # generates random bytes and writes them into a given file + self.assertRaises(csv.Error, Librarian, "sheet-2-corrupted-bytes.csv") + self.assertRaises(csv.Error, Librarian, "sheet-3-wrong-format.txt") + self.assertRaises(csv.Error, Librarian, "sheet-4-no-extension") + self.assertRaises(FileNotFoundError, Librarian, "sheet-5-missing-file.csv") + self.assertRaises(StopIteration, Librarian, "sheet-6-empty-file.csv") + # TODO: make this file locked during runner workflow with chmod 600 + self.assertRaises(PermissionError, Librarian, "locked-dir/locked_file.csv") + + def test_access_date(self): + """ + accessDate() should: + - return True if lib contains Date obj, and return obj + - return False if lib does not contain Date obj, and return empty obj + - throw ValueError if the string does not follow day format + """ + # obj is truthy if it has uid and at least one child DatedEntry (debatable) + self.assertTrue(self.lib.access_date("2022-10-25")) + self.assertTrue(self.lib.access_date("2022-10-26")) + self.assertTrue(self.lib.access_date("2022-10-27")) + self.assertTrue(self.lib.access_date("2022-10-30")) + + # obj is falsy if the object has no child DatedEntry (debatable) + self.assertRaises(FileNotFoundError, self.lib.access_date, "2022-10-21") + self.assertRaises(FileNotFoundError, self.lib.access_date, "2022-10-20") + self.assertRaises(FileNotFoundError, self.lib.access_date, "2017-10-20") + self.assertRaises(FileNotFoundError, self.lib.access_date, "1819-10-20") + + self.assertRaises(ValueError, self.lib.access_date, "ABC") + self.assertRaises(ValueError, self.lib.access_date, "2022") + self.assertRaises(ValueError, self.lib.access_date, "1999-1-1") + self.assertRaises(ValueError, self.lib.access_date, "12:00 AM") + + def test_has_custom_moods(self): + self.assertTrue(self.lib.has_custom_moods()) + self.assertFalse(Librarian("sheet-1-valid-data.csv")) + self.assertRaises(json.JSONDecodeError, Librarian, "sheet-1-valid-data.csv", "empty_sheet.csv") + self.assertRaises(FileNotFoundError, Librarian, "sheet-1-valid-data.csv", "missing-file.json") + self.assertRaises(PermissionError, Librarian, "sheet-1-valid-data.csv", "locked-dir/locked_file.csv") + self.assertRaises(KeyError, Librarian, "sheet-1-valid-data.csv", "incomplete-moods.json") + + diff --git a/_tests/test_main.py b/_tests/test_main.py deleted file mode 100755 index 8018ad8..0000000 --- a/_tests/test_main.py +++ /dev/null @@ -1,191 +0,0 @@ -"""Unit tests for Daylio-Obsidian parser""" -import unittest -from handlers import Librarian -from handlers import DayNotInJournalError -from handlers import InvalidDateFormatError -from handlers import EntryNotInJournalError - -class TestLibrarian(unittest.TestCase): - """ - Tests the Librarian handler-class of the journal. - The Librarian is responsible for parsing files and outputing the final journal. - We use internal class methods to check proper handling of data throughout the process. - """ - def setUp(self): - self.lib = Librarian().passMoods("moods.json").passFile("_tests/testing_sheet.csv") - - def test_no_moods(self): - """ - When passed a file to parse, Librarian obj should inform about: - - no moods.json file if it hasn't been provided - TODO: Librarian should pass a test if passMoods() was called before passFile() - """ - with self.assertLogs('runtime', level="INFO"): - Librarian().passFile("_tests/testing_sheet.csv") - - def test_bad_files(self): - """Pass some faulty files at the librarian and see if it throws correct erorrs""" - # TODO: maybe generate corrupted_sheet and wrong_format during runner setup in workflow mode? - # dd if=/dev/urandom of="$corrupted_file" bs=1024 count=10 - # generates random bytes and writes them into a given file - self.assertRaises(ValueError, Librarian().passFile("_tests/corrupted_sheet.csv")) - self.assertRaises(TypeError, Librarian().passFile("_tests/wrong_format.txt")) - self.assertRaises(TypeError, Librarian().passFile("_tests/wrong_format_no_ext")) - self.assertRaises(FileNotFoundError, Librarian().passFile("_tests/missing_file.csv")) - # TODO: make this file locked during runner workflow with chmod 600 - self.assertRaises(PermissionError, Librarian().passFile("_tests/locked-dir/locked_file.csv")) - - def access(self, date, dated_entry = None): - """ Alias to detach actual object names and their methods from repetitive test calls""" - if dated_entry is None: - obj = self.lib.accessDate(date) - else: - obj = self.lib.accessDate(date).accessDatedEntry(dated_entry) - return obj - - def test_is_day_filed(self): - """ - accessDate() should: - - return True if lib contains Date obj, and return obj - - return False if lib does not contain Date obj, and return empty obj - - throw ValueError if the string does not follow day format - """ - self.assertTrue(self.access("2022-10-25")) - self.assertTrue(self.access("2022-10-26")) - self.assertTrue(self.access("2022-10-27")) - self.assertTrue(self.access("2022-10-30")) - - # TODO: __bool__ method should be falsy if the object has no child DatedEntry - self.assertFalse(self.access("2022-10-21")) - self.assertFalse(self.access("2022-10-20")) - self.assertFalse(self.access("2017-10-20")) - self.assertFalse(self.access("1819-10-20")) - - self.assertRaises(ValueError, self.lib.access("ABC")) - self.assertRaises(ValueError, self.lib.access("2022")) - self.assertRaises(ValueError, self.lib.access("1999-1-1")) - self.assertRaises(ValueError, self.lib.access("12:00 AM")) - - def test_is_entry_filed(self): - """ - accessDate().accessDatedEntry() should: - - return True if lib contains DatedEntry obj, and return obj - - return False if lib does not contain DatedEntry obj, and return empty obj - - throw ValueError if the strings do not follow day & time format - """ - d = "2022-10-25" - self.assertTrue(self.lib.access(d, "11:36 PM")) - self.assertTrue(self.lib.access(d, "11:40 PM")) - self.assertTrue(self.lib.access(d, "5:00 PM")) - self.assertFalse(self.lib.access(d, "12:00 AM")) - - d = "2022-10-26" - self.assertTrue(self.lib.access(d, "10:00 PM")) - self.assertTrue(self.lib.access(d, "8:00 PM")) - self.assertTrue(self.lib.access(d, "7:30 PM")) - self.assertTrue(self.lib.access(d, "1:00 PM")) - self.assertTrue(self.lib.access(d, "9:00 AM")) - self.assertTrue(self.lib.access(d, "7:50 AM")) - self.assertFalse(self.lib.access(d, "2:05 PM")) - - d = "2022-10-27" - self.assertTrue(self.lib.access(d, "1:49 PM")) - self.assertTrue(self.lib.access(d, "12:00 AM")) - self.assertFalse(self.lib.access(d, "2:59 AM")) - - d = "2022-10-30" - self.assertTrue(self.lib.access(d, "10:04 AM")) - self.assertFalse(self.lib.access(d, "11:09 AM")) - - self.assertRaises(ValueError, self.lib.access("2022-1", "12:00 AM")) # wrong day format - self.assertRaises(ValueError, self.lib.access("2022-1", "2: AM")) # both formats wrong - self.assertRaises(ValueError, self.lib.access("WHAT", "IS GOING ON")) - self.assertRaises(ValueError, self.lib.access("/ASDFVDJU\\", "%#")) - - def test_getters(self): - """ - getX() retrieves a given property of the Date/DatedEntry object. It should: - - be equal to the expected value if we query an existing DatedEntry - - throw DatedEntryNotFoundError if we query an empty DatedEntry obj - """ - d = "2022-10-25" - note = "Nulla vel risus eget magna lacinia aliquam ac in arcu." - self.assertEqual(self.access(d, "11:36 PM").getMood(), "hungry") - self.assertEqual(self.access(d, "11:36 PM").getNote(), note) - self.assertNotEqual(self.access(d, "5:00 PM").getMood(), "rad") - - d = "2022-10-26" - note = "QYuip." - self.assertEqual(self.access(d, "10:00 PM").getMood(), "captivated") - self.assertNotEqual(self.access(d, "8:00 PM").getNote(), note) - self.assertNotEqual(self.access(d, "7:30 PM").getMood(), "blissful") - - d = "2022-10-27" - activities = [ "allegro", "working-remotely" ] - self.assertEqual(self.access(d, "12:00 AM").getMood(), "fatigued") - self.assertListEqual(self.access(d, "12:00 AM").getActivites(), activities) - self.assertNotEqual(self.access(d, "1:49 PM").getMood(), "guilty") - self.assertNotEqual(self.access(d, "1:49 PM").getActivites(), activities) - - d = "2022-10-30" - self.assertEqual(self.lib.accessDate(d, "10:04 AM").getMood(), "vaguely ok") - self.assertNotEqual(self.lib.accessDate(d, "10:04 AM").getMood(), "captivated") - - self.assertRaises(DatedEntryNotFoundError, self.access("2022-10-25", "1:13 PM").getMood(), "rad") - self.assertRaises(DatedEntryNotFoundError, self.access("1999-05-26", "11:15 M").getMood(), "blissful") - self.assertRaises(DatedEntryNotFoundError, self.access("2022-10-27", "3:51 PM").getMood(), "guilty") - self.assertRaises(ValueError, self.access("2022-10-26", "29:04 AM").getMood(), "captivated") - - # Try to query a property of DatedEntry (e.g. mood) in a Date object - self.assertRaises(AttributeError, self.access(d).getMood()) - self.assertRaises(AttributeError, self.access(d).getActivites()) - - # Reverse - try to query a property of Date in a DatedEntry object - self.assertRaises(AttributeError, self.access(d, "10:04 AM").getBoilerplate()) - - def testOutputBoilerplate(self): - """ - outputBoilerplate() is normally a private method, but we want to check if: - - it throws an error when trying to output without destination set - - it throws an error when invoked from a missing Date - - it throws an error when invoked in DatedEntry obj instead of Date obj - - it returns True when succeeds - """ - d = "2022-10-26" - # at this point the destination dir has not been set in Librarian obj - self.assertRaises(UnknownDestinationError, self.access(d).outputBoilerplate()) - # now we correctly set it to output to pwd - self.assertTrue(self.lib.setOutput("_tests/").outputBoilerplate()) - self.assertRaises(DatedEntryNotFoundError, self.access("1999-09-25").outputBoilerplate()) - self.assertRaises(DatedEntryNotFoundError, self.access(d, "7:30 AM").outputBoilerplate()) - - def testOutputPartial(self): - """ - output() for DatedEntry obj should output its contents into lib destination - Actual comparison between outputted and expected text at github-workflow stage - """ - d = "2022-10-26" - self.assertTrue(self.lib.accessDate(d).accessDatedEntry("7:30 PM").output()) - self.assertTrue(self.lib.accessDate(d).accessDatedEntry("8:00 PM").output()) - - def testOutputFullDate(self): - """ - output() for Date obj should output boilerplate & all its DatedEntry children - Actual comparison between outputted and expected text at github-workflow stage - """ - d = "2022-10-26" - self.assertTrue(self.lib.accessDate(d).output()) - - def testOutputAll(self): - """ - output() for Librarian obj should output all Date children - Actual comparison between outputted and expected text at github-workflow stage - """ - self.assertTrue(self.lib.output("_tests/output/")) - self.assertRaises(NotADirectoryError, self.lib.setOutput("_tests/dev-null").output()) - self.assertRaises(FileNotFoundError, self.lib.setOutput("_tests/it/does/not/exist").output()) - self.assertRaises(PermissionError, self.lib.setOutput("_tests/locked-dir").output()) - -# is this run as a main program, not component? -if __name__ == '__main__': - unittest.main(argv=["first-arg-is-ignored"]) diff --git a/_tests/test_utils.py b/_tests/test_utils.py new file mode 100644 index 0000000..aa4446f --- /dev/null +++ b/_tests/test_utils.py @@ -0,0 +1,27 @@ +import os.path +from unittest import TestCase +import logging +import utils + + +class TestUtils(TestCase): + def test_slugify(self): + # no need to check if slug is a valid tag + self.assertEqual(utils.slugify("ConvertThis to-------a SLUG", False), "convertthis-to-a-slug") + self.assertEqual(utils.slugify("Zażółć gęślą jaźń ", False), "zażółć-gęślą-jaźń") + self.assertEqual(utils.slugify(" Multiple spaces between words", False), "multiple-spaces-between-words") + self.assertEqual(utils.slugify("Хлеба нашего повшеднего", False), "хлеба-нашего-повшеднего") + + # check if the slug is a valid tag + with self.assertLogs(logging.getLogger("utils"), logging.WARNING): + utils.slugify("1. Digit cannot appear at the beginning of a tag", True) + + with self.assertNoLogs(logging.getLogger("utils"), logging.WARNING): + utils.slugify("Digits within the string 1234 - are ok", True) + + with self.assertNoLogs(logging.getLogger("utils"), logging.WARNING): + utils.slugify("Digits at the end of the string are also ok 456", True) + + def test_expand_path(self): + self.assertEqual(utils.expand_path("$HOME/whatever"), "/home/deutschegabanna/whatever") + self.assertEqual(utils.expand_path('~'), "/home/deutschegabanna") diff --git a/cmd_args.py b/cmd_args.py deleted file mode 100755 index 1ed3023..0000000 --- a/cmd_args.py +++ /dev/null @@ -1,67 +0,0 @@ -"""Sets up all necessary options and arguments for main.py""" -import argparse - -parser = argparse.ArgumentParser( - prog="Daylio to Obsidian Parser", - description="Converts Daylio .CSV backup file into Markdown files for Obsidian.", - epilog="Created by DeutscheGabanna" -) -parser.add_argument( - "filepath", - type=str, - help="Specify path to the .CSV file" -) -parser.add_argument( - "destination", - help="Path to output finished files into." -) -parser.add_argument( - "--prefix", # YYYY-MM-DD.md so remember about a separating char - default='', - help="Prepends a given string to each entry's header." -) -parser.add_argument( - "--suffix", # YYYY-MM-DD.md so remember about a separating char - default='', - help="Appends a given string at the end of each entry's header." -) -parser.add_argument( - "--tag_activities", "-a", - action="store_true", - help="Tries to convert activities into valid tags.", - dest="ACTIVITIES_AS_TAGS" -) -parser.add_argument( - "-colour", "--color", - action="store_true", - help="Prepends a colour emoji to each entry depending on mood.", - dest="colour" -) -parser.add_argument('--version', action='version', version='%(prog)s 2.0') -parser.add_argument( - "--header", - type=int, - default=2, - help="Headings level for each entry.", - dest="HEADER_LEVEL" -) -parser.add_argument( - "--tags", - help="Tags in the YAML metamood_to_check of each note.", - default="daily", - dest="TAGS" -) -# TODO: Force-argument does nothing yet. -parser.add_argument( - "--force", - choices=["accept", "refuse"], - help="Skips user confirmation when overwriting files. Either force 'accept' (DANGEROUS!) or 'refuse' all requests." -) -parser.add_argument( - "--csv-delimiter", - default="|", - help="Set delimiter for activities in Daylio .CSV, e.g: football | chess" -) -# TODO: User should be able to set verbosity level in logging - -settings = parser.parse_args() diff --git a/compile_md.py b/compile_md.py index 085ff2c..cf53047 100755 --- a/compile_md.py +++ b/compile_md.py @@ -14,7 +14,7 @@ import logging from functools import reduce # Other -from cmd_args import settings +from config import settings import load_moods import utils @@ -57,14 +57,4 @@ def compile_entry_contents(entry): final_output += "\n\n" return final_output -def get_colour(mood_to_check): - """Prepend appropriate colour for the mood passed in mood_to_check""" - prepended_colour = "" - mood_colour=["🟣","🟢","🔵","🟠","🔴"] # 0 - best, 4 - worst mood group - if settings.colour: - for i, (_, this_group) in enumerate(load_moods.available_moods.items()): - if mood_to_check in this_group: - prepended_colour = f"{mood_colour[i]} " - if not prepended_colour: - logging.warning("%s was not found in moods.json database.", mood_to_check) - return prepended_colour + diff --git a/config.py b/config.py new file mode 100755 index 0000000..e089e73 --- /dev/null +++ b/config.py @@ -0,0 +1,109 @@ +""" +Sets up all necessary options and arguments. +Librarian: +├── filepath +├── destination +└── force + +DatedGroup: +└── tags + +DatedEntries: +├── prefix (output) +├── suffix (output) +├── activities_as_tags (creation) +├── csv_delimiter +├── colour +└── header + +You can either: +* config.get_defaults() to avoid any complains from argparse during testing +* create_parser().parse_args() if you're coding for an actual user scenario +The second option will require the program be started in a console environment with properly passed arguments. +""" +import argparse + + +def get_defaults(): + """ + Use this method to avoid any complains from argparse during testing by spoon-feeding two crucial arguments. + """ + return create_parser().parse_args( + ['sheet-1-valid-data.csv', '/output-results'] + ) + + +def create_parser(): + """ + Use this method if you're coding for an actual user scenario. It actually requires a user to provide the arguments. + """ + # noinspection SpellCheckingInspection + parser = argparse.ArgumentParser( + prog="Daylio to Obsidian Parser", + description="Converts Daylio .CSV backup file into Markdown files for Obsidian.", + epilog="Created by DeutscheGabanna" + ) + parser.add_argument( + "filepath", + type=str, + help="Specify path to the .CSV file" + ) + parser.add_argument( + "destination", + type=str, + help="Path to output finished files into." + ) + parser.add_argument( + "--prefix", # YYYY-MM-DD.md so remember about a separating char + default='', + help="Prepends a given string to each entry's header." + ) + parser.add_argument( + "--suffix", # YYYY-MM-DD.md so remember about a separating char + default='', + help="Appends a given string at the end of each entry's header." + ) + parser.add_argument( + "--tag_activities", "-a", + action="store_true", + help="Tries to convert activities into valid tags.", + dest="ACTIVITIES_AS_TAGS" + ) + parser.add_argument( + "-colour", "--color", + action="store_true", + help="Prepends a colour emoji to each entry depending on mood.", + dest="colour" + ) + parser.add_argument( + '--version', + action='version', + version='%(prog)s 2.0' + ) + parser.add_argument( + "--header", + type=int, + default=2, + help="Headings level for each entry.", + dest="HEADER_LEVEL" + ) + parser.add_argument( + "--tags", + help="Tags in the YAML of each note.", + default="daily", + dest="TAGS" + ) + # TODO: Force-argument does nothing yet. + parser.add_argument( + "--force", + choices=["accept", "refuse"], + help="Skips user confirmation when overwriting files. Either force 'accept' or 'refuse' all requests." + ) + parser.add_argument( + "--csv-delimiter", + default="|", + help="Set delimiter for activities in Daylio .CSV, e.g: football | chess" + ) + # TODO: User should be able to set verbosity level in logging + + return parser diff --git a/dated_entries_group.py b/dated_entries_group.py new file mode 100644 index 0000000..fb2d1c1 --- /dev/null +++ b/dated_entries_group.py @@ -0,0 +1,89 @@ +import re +import logging +import errors +from dated_entry import DatedEntry +import utils + + +class ErrorMsg(errors.ErrorMsgBase): + CSV_ROW_INCOMPLETE_DATA = "Passed .csv contains rows with invalid data. Tried to parse {} as date." + + +class Date: + """ + Day, month and year of a particular date. Validates the date string on instantiation. + str(instance) returns the valid date in the YYYY-MM-DD format. + """ + def __init__(self, string): + self.__logger = logging.getLogger(self.__class__.__name__) + + # does it have a valid format YYYY-MM-DD + valid_date_pattern = re.compile(r'^\d{4}-\d{1,2}-\d{1,2}$') + msg_on_error = ErrorMsg.print(ErrorMsg.WRONG_VALUE, string, "YYYY-MM-DD") + if not re.match(valid_date_pattern, string): + self.__logger.warning(msg_on_error) + raise ValueError + + # does it have valid ranges? (year ranges are debatable) + date_array = string.strip().split('-') + if not all(( + 1900 < int(date_array[0]) < 2100, + 0 < int(date_array[1]) < 13, + 0 < int(date_array[2][:2]) < 32)): + self.__logger.warning(msg_on_error) + raise ValueError + + self.__year = date_array[0] + self.__month = date_array[1] + self.__day = date_array[2] + + def __str__(self): + return '-'.join([self.__year, self.__month, self.__day]) + + +class DatedEntriesGroup(utils.Core): + """ + A particular date which groups entries written that day. + Raises ValueError if instantiated with a wrong date format. + Otherwise, it is truthy if it has any DatedEntry children, or falsy if it does not. + """ + + def __init__(self, date): + self.__logger = logging.getLogger(self.__class__.__name__) + super().__init__() + self.set_uid(Date(date)) + + self.__hash = hash(self.get_uid()) + self.__known_dated_entries = {} + + def __hash__(self): + return self.__hash + + def __bool__(self): + return all(( + super().__bool__(), + len(self.__known_dated_entries) > 0 + )) + + def access_dated_entry(self, time): + """ + Retrieve an already existing DatedEntry object or create if missing. + Object is accessed or created only if the format is valid. + If the time format is invalid, it returns ValueError on DatedEntry instantiation. + """ + # do you already have an entry from that time? + if time in self.get_known_dated_entries(): + self.__logger.debug(ErrorMsg.print(ErrorMsg.OBJECT_FOUND, time)) + return self.__known_dated_entries[time] + else: + dated_entry_obj = DatedEntry(time, parent=self) + self.__logger.debug(ErrorMsg.print(ErrorMsg.OBJECT_NOT_FOUND, time)) + self.__known_dated_entries[time] = dated_entry_obj + return dated_entry_obj + + def get_known_dated_entries(self): + """ + Retrieve the list of known entries. Returns a dictionary of keys. + If it is queried, the method does not validate the correctness of the query. + """ + return self.__known_dated_entries diff --git a/dated_entry.py b/dated_entry.py new file mode 100644 index 0000000..4f42717 --- /dev/null +++ b/dated_entry.py @@ -0,0 +1,146 @@ +"""Creates structured data from Daylio .CSV""" +import logging +import re + +import errors +import utils + + +def is_time_format_valid(string): + return re.compile(r'^([0-1]?[0-9]|2[0-3]):[0-5][0-9]($|\sAM|\sPM)').match(string) + + +def is_time_range_valid(string): + time_array = string.strip().split(':') + + # Check if it's in 12-hour format (AM/PM) or 24-hour format + if 'AM' in string or 'PM' in string: + is_hour_ok = 0 <= int(time_array[0]) <= 12 + else: + is_hour_ok = 0 <= int(time_array[0]) < 24 + + # Minutes can be checked irrespective of AM/PM/_ notation + is_minutes_ok = 0 <= int(time_array[1][:2]) < 60 + + return all((is_hour_ok, is_minutes_ok)) + + +def slice_quotes(string): + """ + Gets rid of initial and terminating quotation marks inserted by Daylio + """ + if len(string) > 2: + return string.strip("\"") + # only 2 characters? Then it is an empty cell. + return "" + + +class ErrorMsg(errors.ErrorMsgBase): + INVALID_MOOD = "Mood {} is missing from a list of known moods. Colouring won't work for this one." + WRONG_PARENT = "Object of class {} is trying to instantiate {} as child. This will end badly." + + +class Time: + """ + Hour and minutes of a particular moment in time. Validates the time string on instantiation. + str(instance) returns the valid time in the HH:MM format. + """ + + def __init__(self, string): + self.__logger = logging.getLogger(self.__class__.__name__) + + if is_time_format_valid(string) and is_time_range_valid(string): + time_array = string.strip().split(':') + self.__hour = time_array[0] + self.__minutes = time_array[1] + else: + msg_on_error = ErrorMsg.print(ErrorMsg.WRONG_VALUE, string, "HH:MM (AM/PM/)") + self.__logger.warning(msg_on_error) + raise ValueError(msg_on_error) + + def __str__(self): + return ':'.join([self.__hour, self.__minutes]) + + +class DatedEntry(utils.Core): + """ + Journal entry made at a given moment in time, and describing a particular emotional state. + It inherits None uid from utils.Core which is then set to self.time. Object is unusable without uid. + """ + + def __init__(self, + time, + parent, + mood, + known_moods): + self.__logger = logging.getLogger(self.__class__.__name__) + super().__init__() + + self.set_uid(Time(time)) + + self.__mood = self.set_mood(mood, known_moods) + self.__activities = [] + self.__title = None + self.__note = None + self.__parent = parent + + def __bool__(self): + # A DatedEntry is truthy only if it contains a healthy parent, time/uid and mood + return all([ + super().__bool__(), + self.get_uid(), + self.get_mood(), + self.get_parent() + ]) + + def set_mood(self, mood, list_of_moods): + valid = False + for i, (_, this_group) in enumerate(list_of_moods.items()): + if mood in this_group: + valid = True + if not valid: + self.__logger.warning(ErrorMsg.print(ErrorMsg.INVALID_MOOD, mood)) + # Assign it anyway. Warning is enough. + self.__mood = mood + return True + + def get_mood(self): + return self.__mood + + def set_activities(self, pipe_delimited_activity_string, delimiter, should_taggify): + array = slice_quotes(pipe_delimited_activity_string).split(delimiter) + if len(array) > 0: + for activity in array: + self.__activities.append(utils.slugify( + activity, + should_taggify + )) + return True + else: + return False + + def get_activities(self): + return self.__activities + + def set_title(self, title): + if len(title) > 0: + self.__title = slice_quotes(title) + return True + else: + return False + + def get_title(self): + return self.__title + + def set_note(self, note): + if len(note) > 0: + self.__note = slice_quotes(note) + return True + else: + return False + + def get_note(self): + return self.__note + + def get_parent(self): + return self.__parent diff --git a/errors.py b/errors.py new file mode 100644 index 0000000..9cfe13e --- /dev/null +++ b/errors.py @@ -0,0 +1,33 @@ +import logging + +# Common logging configuration for the root logger +# noinspection SpellCheckingInspection +msg_format = "(%(asctime)s) %(name)s [%(levelname)s]: %(message)s" +logging.basicConfig(level=logging.DEBUG, format=msg_format) +formatter = logging.Formatter(msg_format) + +# Create a file handler for the root logger +file_log_handler = logging.FileHandler("debug.log") +file_log_handler.setLevel(logging.DEBUG) +file_log_handler.setFormatter(formatter) + +# Create a console handler for the root logger +console_log_handler = logging.StreamHandler() +console_log_handler.setLevel(logging.WARNING) +console_log_handler.setFormatter(formatter) + +# Add the handlers to the root logger +logging.getLogger().addHandler(file_log_handler) +logging.getLogger().addHandler(console_log_handler) + + +class ErrorMsgBase: + # some common errors have been raised in scope into base class instead of child classes + OBJECT_FOUND = "{}-class object found." + OBJECT_NOT_FOUND = "{} object not found. Creating and returning to caller." + FAULTY_OBJECT = "Called a {}-class object method but the object has been incorrectly instantiated." + WRONG_VALUE = "Received {}, expected {}." + + @staticmethod + def print(message, *args): + return message.format(*args) diff --git a/librarian.py b/librarian.py new file mode 100644 index 0000000..127acf5 --- /dev/null +++ b/librarian.py @@ -0,0 +1,173 @@ +"""Provides a 2D array of mood groups and associated moods based on the moods.json file""" +import csv +import json +import logging +import sys + +import config +import errors +import utils +from dated_entries_group import DatedEntriesGroup + + +class ErrorMsg(errors.ErrorMsgBase): + FILE_INCOMPLETE = "{} is incomplete." + FILE_EMPTY = "{} is empty." + FILE_MISSING = "{} does not exist." + PERMISSION_ERROR = "Cannot access {}." + STANDARD_MOODS_USED = "Standard mood set (rad, good, neutral, bad, awful) will be used." + DECODE_ERROR = "Error while decoding {}" + NOT_A_FILE = "{} is not a file." + CSV_ALL_FIELDS_PRESENT = "All expected columns are present in the CSV file columns." + CSV_FIELDS_MISSING = "The following expected columns are missing: " + COUNT_ROWS = "Found {} rows of data in {}." + + +standard_mood_set = { + "rad": ["rad"], + "good": ["good"], + "neutral": ["neutral"], + "bad": ["bad"], + "awful": ["awful"] +} + + +class Librarian: + def __init__(self, + path_to_file, + path_to_moods=None, + custom_config=config.get_defaults()): + self.__known_moods = standard_mood_set + self.__known_dates = {} + self.__logger = logging.getLogger(self.__class__.__name__) + + self.__pass_file(path_to_file, custom_config) + self.__set_custom_moods(path_to_moods) + + def has_custom_moods(self): + return self.__known_moods != standard_mood_set + + def __set_custom_moods(self, json_file): + """ + Overwrite the standard mood-set with a custom one. + Mood-sets are used in output formatting to colour-code the dated entries. + Mood-set is a dict with five keys: rad, good, neutral, bad and awful. + Each key holds an array of any number of strings indicating various moods. + """ + exp_path = utils.expand_path(json_file) + try: + with open(exp_path, encoding="UTF-8") as file: + tmp_mood_set = json.load(file) + except FileNotFoundError: + self.__logger.warning(ErrorMsg.print(ErrorMsg.FILE_MISSING, exp_path)) + return False + except PermissionError: + self.__logger.warning(ErrorMsg.print(ErrorMsg.PERMISSION_ERROR, exp_path)) + return False + except json.JSONDecodeError: + self.__logger.warning(ErrorMsg.print(ErrorMsg.DECODE_ERROR, exp_path)) + return False + + # Try accessing each mood key to raise KeyError if missing + for mood_key in self.__known_moods.keys(): + try: + tmp_mood_set[mood_key] + except KeyError: + self.__logger.warning(ErrorMsg.print(ErrorMsg.FILE_INCOMPLETE, exp_path)) + return False + else: + continue + + # At this point, we know each mood key is present so the dictionary is valid + self.__known_moods = tmp_mood_set + return True + + def __pass_file(self, filepath, custom_config): + """ + Open and parse filepath. Then structure it into Date & DatedEntry objects. + """ + if not self.has_custom_moods(): + self.__logger.info(ErrorMsg.print(ErrorMsg.STANDARD_MOODS_USED)) + + try: + file = open(filepath, newline='', encoding='UTF-8') + except FileNotFoundError: + self.__logger.critical(ErrorMsg.print(ErrorMsg.FILE_MISSING, filepath)) + sys.exit(1) # no point in continuing + except PermissionError: + self.__logger.critical(ErrorMsg.print(ErrorMsg.PERMISSION_ERROR, filepath)) + sys.exit(1) # no point in continuing + except OSError: + self.__logger.critical(OSError) + sys.exit(1) # no point in continuing + + expected_fields = [ + "full_date", + "date", + "weekday", + "time", + "mood", + "activities", + "note", + "note_title", + "note" + ] + + with file: + # Is it a valid CSV? + try: + # strict parameter throws csv.Error if parsing fails + raw_lines = csv.DictReader(file, delimiter=',', quotechar='"', strict=True) + except csv.Error: + self.__logger.critical(ErrorMsg.print(ErrorMsg.DECODE_ERROR, filepath)) + sys.exit(1) + + # Does it have all the fields? + missing_strings = [expected_field for expected_field in expected_fields if + expected_field not in raw_lines.fieldnames] + if not missing_strings: + self.__logger.debug(ErrorMsg.print(ErrorMsg.CSV_ALL_FIELDS_PRESENT)) + else: + self.__logger.critical( + ErrorMsg.print( + ErrorMsg.CSV_FIELDS_MISSING, + {', '.join(missing_strings)} + ) + ) + sys.exit(1) + + # Does it have any rows besides the header? + try: + next(raw_lines) + except StopIteration: + self.__logger.critical(ErrorMsg.print(ErrorMsg.FILE_EMPTY, filepath)) + sys.exit(1) + + # Do any of the rows lack required fields? + lines_parsed = 0 + for line in raw_lines: + line: dict[str] # fix parser incorrectly assuming type + if len(line) < 7: + self.__logger.warning(ErrorMsg.print(ErrorMsg.FILE_INCOMPLETE, line)) + else: + entry = self.access_date(line["full_date"]).access_dated_entry(line["time"]) + entry.set_mood(line["mood"], self.__known_moods) + entry.set_activities(line["activities"], custom_config.csv_delimiter, custom_config.tag_activities) + entry.set_title(line["note_title"]) + entry.set_note(line["note"]) + lines_parsed += 1 + self.__logger.info(ErrorMsg.print(ErrorMsg.COUNT_ROWS, lines_parsed, filepath)) + + return self + + def access_date(self, target_date): + date_obj = DatedEntriesGroup(target_date) + + # have you already filed this date? + if target_date in self.__known_dates: + self.__logger.debug(ErrorMsg.print(ErrorMsg.OBJECT_FOUND, target_date)) + else: + self.__logger.debug(ErrorMsg.print(ErrorMsg.OBJECT_NOT_FOUND, target_date)) + self.__known_dates[date_obj.get_uid()] = date_obj + + return date_obj diff --git a/load_moods.py b/load_moods.py deleted file mode 100755 index 40bcfdd..0000000 --- a/load_moods.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Provides a 2D array of mood groups and associated moods based on the moods.json file""" -import json - -# MOODS are used to determine colour coding for the particular moods if colour = TRUE -# [0,x] - best, [4,x] - worst - -with open("moods.json", encoding="UTF-8") as jsonfile: - available_moods = json.load(jsonfile) diff --git a/main.py b/main.py index 9139ad0..1e99343 100755 --- a/main.py +++ b/main.py @@ -1,52 +1,34 @@ """Parse a Daylio CSV into an Obsidian-compatible .MD file""" import logging import os -# Other -import parse_csv -import compile_md -import write_md + +import config import utils -import cmd_args +from librarian import Librarian logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) - -# Create a custom handler for console output -console_handler = logging.StreamHandler() -console_handler.setLevel(logging.INFO) - -# Create a formatter for the log messages -formatter = logging.Formatter('%(levelname)s: %(message)s') -console_handler.setFormatter(formatter) - -# Add the handler to the logger -logger.addHandler(console_handler) +main_config = config.create_parser().parse_args() # SETTING THE EXPORT DIRECTORY -parsed_path = utils.expand_path(cmd_args.settings.destination) +parsed_path = utils.expand_path(main_config.destination) logger.info("Checking if destination %s exists...", parsed_path) if not os.path.isdir(parsed_path): - os.makedirs(cmd_args.settings.destination) + os.makedirs(main_config.destination) logger.info("Destination missing... Created") else: logger.info("Destination exists...") -# Parse rows into dictionary of days, where each day can have multiple Entry objects -days = parse_csv.setup( - utils.expand_path(cmd_args.settings.filepath) -) -logger.info( - "Parsed %d unique days with %d entries in total", - len(days), - sum(len(array) for array in days.values()) -) +Librarian( + path_to_file="_tests/sheet-1-valid-data.csv", + path_to_moods="moods.json", + custom_config=main_config) -for day, entries in days.items(): - note_contents = compile_md.compile_note_yaml() - for current_entry in entries: - note_contents += compile_md.compile_entry_contents(current_entry) - current_note = write_md.Note(day, note_contents) - try: - current_note.create_file() - except PermissionError: - logger.debug("User refused to overwrite the file.") +# for day, entries in days.items(): +# note_contents = compile_md.compile_note_yaml() +# for current_entry in entries: +# note_contents += compile_md.compile_entry_contents(current_entry) +# current_note = write_md.Note(day, note_contents) +# try: +# current_note.create_file() +# except PermissionError: +# logger.debug("User refused to overwrite the file.") diff --git a/parse_csv.py b/parse_csv.py deleted file mode 100755 index 821bcc7..0000000 --- a/parse_csv.py +++ /dev/null @@ -1,52 +0,0 @@ -"""Creates structured data from Daylio .CSV""" -import csv -import utils -from cmd_args import settings - -delimiter = f" {settings.csv_delimiter} " - -class Entry: - """Journal entry made at a given moment in time, and describing a particular emotional state""" - def __init__(self, parsed_line, prop_inside_delimiter = delimiter): - # expected CSV row structure: full_date,date,weekday,time,mood,activities,note_title,note - if len(parsed_line)<7: - raise IndexError(f"{parsed_line} contains too few elements") - self.time = parsed_line[3] - self.mood = parsed_line[4] - self.activities = self.slice_quotes(parsed_line[5]).split(prop_inside_delimiter) - for index, _ in enumerate(self.activities): - self.activities[index] = utils.slugify( - self.activities[index], - settings.ACTIVITIES_AS_TAGS - ) - self.title = self.slice_quotes(parsed_line[6]) - self.note = self.slice_quotes(parsed_line[7]) - - @staticmethod # does not process any mood_to_check within itself, but relates to the object - def slice_quotes(string): - """Gets rid of initial and terminating quotation marks inserted by Daylio""" - if len(string) > 2: - return string.strip("\"") - # only 2 characters? Then it is an empty cell. - return "" - -def setup(path): - """Open .CSV file in the path and populate array of Entries""" - with open(path, newline='', encoding='UTF-8') as daylio_raw: - daylio_raw_line = csv.reader(daylio_raw, delimiter=',', quotechar='"') - days = {} - next(daylio_raw_line) # only headers in first line, skip - for row in daylio_raw_line: - # create an Entry object instance and pass CSV values from this row into it - current_entry = Entry(row) - - # Finding keys for specific days and appending corresponding entries - # None means that this day has not been added to days yet - if days.get(row[0]) is None: - entry_list = [] - entry_list.append(current_entry) - days[row[0]] = entry_list - else: - its_a_string_trust_me = row[0] - days[its_a_string_trust_me].append(current_entry) - return days diff --git a/utils.py b/utils.py index e7d9535..9438224 100755 --- a/utils.py +++ b/utils.py @@ -2,28 +2,56 @@ import re import os import logging +import errors + + +class ErrorMsg(errors.ErrorMsgBase): + INVALID_OBSIDIAN_TAGS = "You want your activities as tags, but {} is invalid." + + +class Core: + def __init__(self): + self.__uid = None + + def __bool__(self): + return self.__uid is not None + + def __str__(self): + return str(self.__uid) + + def set_uid(self, value): + self.__uid = value + return True + + def get_uid(self): + return str(self.__uid) + def slugify(text, taggify): - """Simple slugification function""" + # noinspection SpellCheckingInspection + """ + Simple slugification function to transform text. Works on non-latin characters too. + """ + logger = logging.getLogger(__name__) text = str(text).lower() - text = re.sub(re.compile(r"\s+"), '-', text) # Replace spaces with - - text = re.sub(re.compile(r"[^\w\-]+"), '', text) # Remove all non-word chars - text = re.sub(re.compile(r"\-\-+"), '-', text) # Replace multiple - with single - - text = re.sub(re.compile(r"^-+"), '', text) # Trim - from start of text - text = re.sub(re.compile(r"-+$"), '', text) # Trim - from end of text + text = re.sub(re.compile(r"\s+"), '-', text) # Replace spaces with - + text = re.sub(re.compile(r"[^\w\-]+"), '', text) # Remove all non-word chars + text = re.sub(re.compile(r"--+"), '-', text) # Replace multiple - with single - + text = re.sub(re.compile(r"^-+"), '', text) # Trim - from start of text + text = re.sub(re.compile(r"-+$"), '', text) # Trim - from end of text if taggify: if re.match('[0-9]', text): - logging.warning("You want your activities as tags, but %s is invalid.", text) + logger.warning(ErrorMsg.print(ErrorMsg.INVALID_OBSIDIAN_TAGS, text)) return text + def expand_path(path): - """Expand all %variables%, ~/home-directories and relative parts in the path - Return the expanded, absolute path""" - # Expands the tilde (~) character to the user's home directory + """ + Expand all %variables%, ~/home-directories and relative parts in the path. Return the expanded path. + It does not use os.path.abspath() because it treats current script directory as root. + """ + # Converts the filepath to an absolute path and then expands the tilde (~) character to the user's home directory return os.path.expanduser( # Expands environment variables in the path, such as %appdata% - os.path.expandvars( - # Converts the filepath to an absolute path - os.path.abspath(path) - ) + os.path.expandvars(path) ) diff --git a/write_md.py b/write_md.py index 2a3f930..a335713 100755 --- a/write_md.py +++ b/write_md.py @@ -2,7 +2,7 @@ import hashlib import os # Custom -from cmd_args import settings +from config import settings class Note: """Note is a file encompassing every entry made on a given date""" From 514881798ae8c0401ada8a8e651f351f8e2127b9 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna Date: Wed, 22 Nov 2023 20:38:47 +0100 Subject: [PATCH 04/40] better __docs__ for files and classes --- compile_md.py | 60 ------------------------------------------ dated_entries_group.py | 11 ++++++++ dated_entry.py | 8 +++++- errors.py | 9 +++++++ librarian.py | 13 +++++++-- utils.py | 4 ++- write_md.py | 51 ----------------------------------- 7 files changed, 41 insertions(+), 115 deletions(-) delete mode 100755 compile_md.py delete mode 100755 write_md.py diff --git a/compile_md.py b/compile_md.py deleted file mode 100755 index cf53047..0000000 --- a/compile_md.py +++ /dev/null @@ -1,60 +0,0 @@ -"""COMPILE STRUCTURED DATA INTO STRING TO OUTPUT ---------------------------------------------- -According to this schema: ---- -tags: ---- - -### / -#activity_1 #activity_2 #activity_3 -<your_entry> - -[repeat] -""" -import logging -from functools import reduce -# Other -from config import settings -import load_moods -import utils - -def compile_note_yaml(): - """Returns string with YAML metadata for each note. It looks like this: - --- - tags: <your_custom_tags> - --- - """ - return f"---\ntags: {settings.TAGS} \n---\n\n" - -def compile_entry_contents(entry): - """Return a string that is a parsed entry from Daylio CSV as a string. - It looks like this: - - ### <hour> / <title> - #activity_1 #activity_2 #activity_3 - <your_entry> - - """ - # compose the title with optional mood colouring - this_entry_title = get_colour(entry.mood) + entry.mood + " - " + entry.time - final_output = settings.HEADER_LEVEL*'#' + " " + this_entry_title - - # compose the mood-tag and the activity-tags into one paragraph - final_output += "\nI felt #" - final_output += utils.slugify(entry.mood, settings.ACTIVITIES_AS_TAGS) - if len(entry.activities) > 0 and entry.activities[0] != "": - final_output += " with the following: " - ## first append # to each activity, then mush them together into one string - final_output += reduce( - lambda el1,el2 : el1+" "+el2, map(lambda x:"#"+x,entry.activities) - ) - else: final_output += "." - - ## then add the rest of the text - if entry.note != "": - final_output += "\n" + entry.note + "\n\n" - else: - final_output += "\n\n" - return final_output - - diff --git a/dated_entries_group.py b/dated_entries_group.py index fb2d1c1..943d216 100644 --- a/dated_entries_group.py +++ b/dated_entries_group.py @@ -1,3 +1,10 @@ +""" +This file specialises in building the middleware between actual journal entries and the whole journal. +It creates and organises only those entries written on a particular date. This way they can be handled easier. + +Here's a quick breakdown of what is the specialisation of this file in the journaling process: +all notes -> _NOTES WRITTEN ON A PARTICULAR DATE_ -> a particular note +""" import re import logging import errors @@ -46,6 +53,10 @@ class DatedEntriesGroup(utils.Core): A particular date which groups entries written that day. Raises ValueError if instantiated with a wrong date format. Otherwise, it is truthy if it has any DatedEntry children, or falsy if it does not. + + Imagine it as a scribe, holding a stack of papers in his hand. + The master Librarian knows each one of the scribes, including this one. + However, the scribe knows only his papers. The papers contain all entries written that particular date. """ def __init__(self, date): diff --git a/dated_entry.py b/dated_entry.py index 4f42717..151996c 100644 --- a/dated_entry.py +++ b/dated_entry.py @@ -1,4 +1,10 @@ -"""Creates structured data from Daylio .CSV""" +""" +dated_entry focuses on building the individual entries, made at a particular moment, as objects. +It is the most atomic level of the journaling process. + +Here's a quick breakdown of what is the specialisation of this file in the journaling process: +all notes -> notes written on a particular date -> _A PARTICULAR NOTE_ +""" import logging import re diff --git a/errors.py b/errors.py index 9cfe13e..32dea7c 100644 --- a/errors.py +++ b/errors.py @@ -22,6 +22,11 @@ class ErrorMsgBase: + """ + Used for common errors that will be logged by almost (if not all) loggers. + Therefore, it is the base class of other Error child classes in specific files. + It also provides a shorthand method for inserting variables into error messages - print(). + """ # some common errors have been raised in scope into base class instead of child classes OBJECT_FOUND = "{}-class object found." OBJECT_NOT_FOUND = "{} object not found. Creating and returning to caller." @@ -30,4 +35,8 @@ class ErrorMsgBase: @staticmethod def print(message, *args): + """ + Insert the args into an error message. If the error message expects n variables, provide n arguments. + Returns a string with the already filled out message. + """ return message.format(*args) diff --git a/librarian.py b/librarian.py index 127acf5..b92bbd8 100644 --- a/librarian.py +++ b/librarian.py @@ -1,4 +1,13 @@ -"""Provides a 2D array of mood groups and associated moods based on the moods.json file""" +""" +Librarian is a builder/singleton type class. It creates and initialises other builder objects - e.g. DatedEntriesGroup. +It sets up the process, parses the CSV file and passes extracted values to DatedEntriesGroup. +Imagine Librarian is an actual person, reading the contents of the file out-loud to a scribe (DatedEntriesGroup). +Each date is a different scribe, but they all listen to the same Librarian. +Librarian knows their identity and can call upon them when needed to recite their contents back to the Librarian. + +Here's a quick breakdown of what is the specialisation of this file in the journaling process: +_ALL NOTES_ -> notes written on a particular date -> a particular note +""" import csv import json import logging @@ -146,7 +155,7 @@ def __pass_file(self, filepath, custom_config): # Do any of the rows lack required fields? lines_parsed = 0 for line in raw_lines: - line: dict[str] # fix parser incorrectly assuming type + line: dict[str] # fix parser incorrectly assuming type if len(line) < 7: self.__logger.warning(ErrorMsg.print(ErrorMsg.FILE_INCOMPLETE, line)) else: diff --git a/utils.py b/utils.py index 9438224..07e4859 100755 --- a/utils.py +++ b/utils.py @@ -1,4 +1,6 @@ -"""Contains universally useful functions""" +""" +Contains universally useful functions +""" import re import os import logging diff --git a/write_md.py b/write_md.py deleted file mode 100755 index a335713..0000000 --- a/write_md.py +++ /dev/null @@ -1,51 +0,0 @@ -"""Creates an .MD file based on the string provided by compile_md.py""" -import hashlib -import os -# Custom -from config import settings - -class Note: - """Note is a file encompassing every entry made on a given date""" - def __init__(self, day, contents): - self.day = day - self.contents = contents - self.path = f"{os.path.join(settings.destination, settings.prefix)}{day}{settings.suffix}.md" - self.control_sum = hashlib.sha256() - temp_contents = self.contents # we need a temporary contents var to be consumed - for byte_block in iter(lambda: temp_contents[:4096].encode('utf-8'), b""): - self.control_sum.update(byte_block) - temp_contents = temp_contents[4096:] - - def create_file(self): - """Tries to create or overwrite file from self.path with self.contents - Raises PermissionError if file exists and user does not want it overwritten.""" - if os.path.exists(self.path) and self.compare_existing() is True: - # User can have note files from previous runs of the script - choice = None # [None, yes, no] - # TODO: support for forcing overwrites if optional argument is set to True - while choice is None: - answer = input(f"{self.path} already exists and differs. Overwrite? (y/n) ") - if str(answer).lower() in ["yes", "y"]: - choice = True - self.output_contents() - elif str(answer).lower() in ["no", "n"]: - choice = False - if choice is False: - raise PermissionError("User denied overwrite") - else: - self.output_contents() - - def output_contents(self): - """Opens self.path and outputs self.contents. - Recurring usage in self.create_file()""" - with open(self.path, 'w', encoding='UTF-8') as file: - file.write(self.contents) - - def compare_existing(self): - """Calculates checksum for file existing at self.path. - Returns True if matches self.contents.""" - control_sum_existing_file = hashlib.sha256() - with open(self.path,"rb") as f: - for byte_block in iter(lambda: f.read(4096),b""): - control_sum_existing_file.update(byte_block) - return control_sum_existing_file.hexdigest() == self.control_sum.hexdigest() From cf032e5f95999674529ad9b1e4c9d5b98923dc70 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Wed, 3 Jan 2024 23:15:50 +0100 Subject: [PATCH 05/40] i'm exhausted and i regret not having more commits. this version still isn't in a usable state, but at least i'd be able to access it online. --- .github/workflows/test.yaml | 6 +- .idea/Obsidian-Daylio-Parser.iml | 6 +- .idea/misc.xml | 2 +- .vscode/launch.json | 20 --- _tests/test_config.py | 57 +++++++++ _tests/test_errors.py | 16 +++ _tests/test_librarian.py | 44 +++---- config.py | 206 ++++++++++++++++--------------- dated_entries_group.py | 118 +++++++++++++----- dated_entry.py | 177 ++++++++++++++++++-------- errors.py | 45 +++++-- librarian.py | 190 +++++++++++++++++++++------- main.py | 35 ++---- utils.py | 20 +-- 14 files changed, 630 insertions(+), 312 deletions(-) delete mode 100755 .vscode/launch.json create mode 100644 _tests/test_config.py create mode 100644 _tests/test_errors.py diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index b712fc8..eda3f64 100755 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -13,6 +13,8 @@ jobs: uses: actions/setup-python@v1 with: python-version: 3.8 - - name: Unittest main.py + - name: Discover tests run: | - python _tests/test_main.py + echo "Starting discovery..." + python -m unittest discover -s "./_tests" + diff --git a/.idea/Obsidian-Daylio-Parser.iml b/.idea/Obsidian-Daylio-Parser.iml index 9ee7bc1..5b0c423 100644 --- a/.idea/Obsidian-Daylio-Parser.iml +++ b/.idea/Obsidian-Daylio-Parser.iml @@ -6,14 +6,10 @@ <excludeFolder url="file://$MODULE_DIR$/_tests/locked-dir" /> <excludeFolder url="file://$MODULE_DIR$/_tests/output-results" /> </content> - <orderEntry type="inheritedJdk" /> + <orderEntry type="jdk" jdkName="$USER_HOME$/miniconda3" jdkType="Python SDK" /> <orderEntry type="sourceFolder" forTests="false" /> </component> <component name="PackageRequirementsSettings"> <option name="requirementsPath" value="" /> </component> - <component name="PyDocumentationSettings"> - <option name="format" value="PLAIN" /> - <option name="myDocStringFormat" value="Plain" /> - </component> </module> \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml index a971a2c..4e9c2aa 100644 --- a/.idea/misc.xml +++ b/.idea/misc.xml @@ -1,4 +1,4 @@ <?xml version="1.0" encoding="UTF-8"?> <project version="4"> - <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.11" project-jdk-type="Python SDK" /> + <component name="ProjectRootManager" version="2" project-jdk-name="$PROJECT_DIR$/../miniconda3" project-jdk-type="Python SDK" /> </project> \ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json deleted file mode 100755 index 87d7d0d..0000000 --- a/.vscode/launch.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - // Use IntelliSense to learn about possible attributes. - // Hover to view descriptions of existing attributes. - // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 - "version": "0.2.0", - "configurations": [ - { - "name": "Python: Current File", - "type": "python", - "request": "launch", - "program": "${file}", - "console": "integratedTerminal", - "justMyCode": true - // "args": [ - // "./_tests/sheet-1-valid-data.csv", - // "~/Daylio export/" - // ] - } - ] -} \ No newline at end of file diff --git a/_tests/test_config.py b/_tests/test_config.py new file mode 100644 index 0000000..d78f8d6 --- /dev/null +++ b/_tests/test_config.py @@ -0,0 +1,57 @@ +from unittest import TestCase +from config import options + + +class TestSettingsManager(TestCase): + def setUp(self) -> None: + self.librarian_settings = options.get_console().add_argument_group( + "Librarian", + "Handles main options" + ) + self.librarian_settings.add_argument( + "filepath", + type=str, + help="Specify path to the .CSV file" + ) + self.librarian_settings.add_argument( + "destination", + default="", + type=str, + help="Path to folder to output finished files into." + ) + self.filepath_to_check = "_tests/sheet-1-valid-data.csv" + self.destination_to_check = "somewhere" + self.force_option_to_check = "accept" + + def test_spoofed_librarian_settings_without_equality_sign(self): + options.parse_spoofed_console([ + self.filepath_to_check, + self.destination_to_check, + "--force", self.force_option_to_check]) + self.assertEqual(options.settings.filepath, self.filepath_to_check) + self.assertEqual(options.settings.destination, self.destination_to_check) + self.assertEqual(options.settings.force, self.force_option_to_check) + + def test_spoofed_librarian_settings_with_equality_sign(self): + options.parse_spoofed_console([ + self.filepath_to_check, + self.destination_to_check, + "--force=" + self.force_option_to_check]) + self.assertEqual(options.settings.filepath, self.filepath_to_check) + self.assertEqual(options.settings.destination, self.destination_to_check) + self.assertEqual(options.settings.force, self.force_option_to_check) + + def test_expected_failure_empty_argument_array(self): + with self.assertRaises(SystemExit) as cm: + options.parse_spoofed_console([]) + self.assertEqual(cm.exception.code, 2, msg="Invalid arguments were passed to argparse so it should exit with 2") + + def test_expected_failure_outside_of_dictionary(self): + with self.assertRaises(SystemExit) as cm: + # noinspection SpellCheckingInspection + options.parse_spoofed_console( + [self.filepath_to_check, + self.destination_to_check, + "--force", + "yabadoo"]) + self.assertEqual(cm.exception.code, 2, msg="Invalid arguments were passed to argparse so it should exit with 2") diff --git a/_tests/test_errors.py b/_tests/test_errors.py new file mode 100644 index 0000000..0f9b0ab --- /dev/null +++ b/_tests/test_errors.py @@ -0,0 +1,16 @@ +from unittest import TestCase +import errors + + +class TestErrorMsgBase(TestCase): + def test_print(self): + # All arguments correctly passed + self.assertIsInstance( + errors.ErrorMsgBase.print(errors.ErrorMsgBase.WRONG_VALUE, "x", "y"), + str, + msg="The function should return a string.") + # Missing argument + self.assertEqual( + errors.ErrorMsgBase.print(errors.ErrorMsgBase.WRONG_VALUE, "y"), + None, + msg="The function should complain it has not received enough arguments to complete the error message") diff --git a/_tests/test_librarian.py b/_tests/test_librarian.py index 2bea20d..61995f7 100644 --- a/_tests/test_librarian.py +++ b/_tests/test_librarian.py @@ -10,13 +10,10 @@ class TestLibrarian(TestCase): The Librarian is responsible for parsing files and outputting the final journal. We use internal class methods to check proper handling of data throughout the process. """ - def setUp(self): - self.lib = Librarian( - path_to_file="sheet-1-valid-data.csv", - path_to_moods="moods.json" - ) - def test_set_custom_moods(self): + """ + Pass faulty moods and see if Librarian notices it does not know any custom moods while parsing. + """ # assertTrue is not needed, because it would have already failed at setUp() self.assertFalse(Librarian("sheet-2-corrupted-bytes.csv").has_custom_moods()) self.assertFalse(Librarian("sheet-3-wrong-format.txt").has_custom_moods()) @@ -46,29 +43,34 @@ def test_access_date(self): - return False if lib does not contain Date obj, and return empty obj - throw ValueError if the string does not follow day format """ + lib = Librarian( + path_to_file="sheet-1-valid-data.csv", + path_to_moods="../moods.json" + ) # obj is truthy if it has uid and at least one child DatedEntry (debatable) - self.assertTrue(self.lib.access_date("2022-10-25")) - self.assertTrue(self.lib.access_date("2022-10-26")) - self.assertTrue(self.lib.access_date("2022-10-27")) - self.assertTrue(self.lib.access_date("2022-10-30")) + self.assertTrue(lib.access_date("2022-10-25")) + self.assertTrue(lib.access_date("2022-10-26")) + self.assertTrue(lib.access_date("2022-10-27")) + self.assertTrue(lib.access_date("2022-10-30")) # obj is falsy if the object has no child DatedEntry (debatable) - self.assertRaises(FileNotFoundError, self.lib.access_date, "2022-10-21") - self.assertRaises(FileNotFoundError, self.lib.access_date, "2022-10-20") - self.assertRaises(FileNotFoundError, self.lib.access_date, "2017-10-20") - self.assertRaises(FileNotFoundError, self.lib.access_date, "1819-10-20") + self.assertRaises(FileNotFoundError, lib.access_date, "2022-10-21") + self.assertRaises(FileNotFoundError, lib.access_date, "2022-10-20") + self.assertRaises(FileNotFoundError, lib.access_date, "2017-10-20") + self.assertRaises(FileNotFoundError, lib.access_date, "1819-10-20") - self.assertRaises(ValueError, self.lib.access_date, "ABC") - self.assertRaises(ValueError, self.lib.access_date, "2022") - self.assertRaises(ValueError, self.lib.access_date, "1999-1-1") - self.assertRaises(ValueError, self.lib.access_date, "12:00 AM") + self.assertRaises(ValueError, lib.access_date, "ABC") + self.assertRaises(ValueError, lib.access_date, "2022") + self.assertRaises(ValueError, lib.access_date, "1999-1-1") + self.assertRaises(ValueError, lib.access_date, "12:00 AM") def test_has_custom_moods(self): - self.assertTrue(self.lib.has_custom_moods()) + self.assertTrue(Librarian( + path_to_file="sheet-1-valid-data.csv", + path_to_moods="../moods.json" + ).has_custom_moods()) self.assertFalse(Librarian("sheet-1-valid-data.csv")) self.assertRaises(json.JSONDecodeError, Librarian, "sheet-1-valid-data.csv", "empty_sheet.csv") self.assertRaises(FileNotFoundError, Librarian, "sheet-1-valid-data.csv", "missing-file.json") self.assertRaises(PermissionError, Librarian, "sheet-1-valid-data.csv", "locked-dir/locked_file.csv") self.assertRaises(KeyError, Librarian, "sheet-1-valid-data.csv", "incomplete-moods.json") - - diff --git a/config.py b/config.py index e089e73..c89a31a 100755 --- a/config.py +++ b/config.py @@ -1,109 +1,117 @@ """ Sets up all necessary options and arguments. -Librarian: -├── filepath -├── destination -└── force +. +├── Librarian/ +│ ├── processing/ +│ │ └── filepath +│ └── outputting/ +│ ├── destination +│ └── force +| +├── DatedGroup/ +│ ├── processing +│ └── outputting +| +└── DatedEntries/ + ├── processing/ + │ └── csv_delimiter + └── outputting/ + ├── prefix + ├── suffix + ├── colour + └── header +""" +import argparse +import logging +from typing import List -DatedGroup: -└── tags +# Logging for config library +logger = logging.getLogger(__name__) -DatedEntries: -├── prefix (output) -├── suffix (output) -├── activities_as_tags (creation) -├── csv_delimiter -├── colour -└── header -You can either: -* config.get_defaults() to avoid any complains from argparse during testing -* create_parser().parse_args() if you're coding for an actual user scenario -The second option will require the program be started in a console environment with properly passed arguments. -""" -import argparse +class SettingsManager: + def __init__(self): + # noinspection SpellCheckingInspection + self.__console_arguments = argparse.ArgumentParser( + fromfile_prefix_chars="@", + prog="Daylio to Obsidian Parser", + description="Converts Daylio .CSV backup file into Markdown files for Obsidian.", + epilog="Created by DeutscheGabanna" + ) + # regardless of whether you stick with these defaults or parse the console args, you get the same properties + # useful for testing purposes when you do not want to invoke argparse + # --- + # Librarian + # --- + self.force = None + # if you're wondering about these two below - I'd rather omit here positional arguments like these + # it makes more sense for them to be passed as function arguments when initialising Librarian object + # self.filepath = "/_tests/sheet-1-valid-data.csv" + # self.destination = "/_tests/output_results/" + # --- + # Dated Entry + # --- + self.csv_delimiter = '|' + self.header = 2 + self.tags = ["daily"] + self.prefix = '' + self.suffix = '' + self.tag_activities = True + self.colour = True + + def get_console(self): + """ + Retrieves the :class:`argparse.ArgumentParser` object from :class:`SettingsManager` so you can modify it. + :return: :class:`argparse.ArgumentParser` + """ + return self.__console_arguments + + def parse_console(self): + """ + Configures SettingsManager by accessing the console and retrieving the arguments used to run the script. + """ + # namespace=self adds the properties to the SettingsManager obj, instead of creating a new Namespace obj + # Without namespace=self + # --- + # - SettingsManager + # - Namespace obj that holds actual settings + # - foo = foo + # - bar = bar + # With namespace=self + # - SettingsManager + # - foo = foo + # - bar = bar + self.__console_arguments.parse_args(namespace=self) + def parse_spoofed_console(self, spoofed_string_of_args: List[str]): + """ + Configures SettingsManager without accessing the console. Useful for testing purposes. Don't use it elsewhere. + :param spoofed_string_of_args: Set of strs with positional and optional arguments as if written in CMD. + """ + # namespace=self adds the properties to the SettingsManager obj, instead of creating a new Namespace obj + # Without namespace=self + # --- + # - SettingsManager + # - Namespace obj that holds actual settings + # - foo = foo + # - bar = bar + # With namespace=self + # - SettingsManager + # - foo = foo + # - bar = bar + self.__console_arguments.parse_args(args=spoofed_string_of_args, namespace=self) -def get_defaults(): - """ - Use this method to avoid any complains from argparse during testing by spoon-feeding two crucial arguments. - """ - return create_parser().parse_args( - ['sheet-1-valid-data.csv', '/output-results'] - ) +# Global configuration +# --- +options = SettingsManager() -def create_parser(): - """ - Use this method if you're coding for an actual user scenario. It actually requires a user to provide the arguments. - """ - # noinspection SpellCheckingInspection - parser = argparse.ArgumentParser( - prog="Daylio to Obsidian Parser", - description="Converts Daylio .CSV backup file into Markdown files for Obsidian.", - epilog="Created by DeutscheGabanna" - ) - parser.add_argument( - "filepath", - type=str, - help="Specify path to the .CSV file" - ) - parser.add_argument( - "destination", - type=str, - help="Path to output finished files into." - ) - parser.add_argument( - "--prefix", # <here's your prefix>YYYY-MM-DD.md so remember about a separating char - default='', - help="Prepends a given string to each entry's header." - ) - parser.add_argument( - "--suffix", # YYYY-MM-DD<here's your suffix>.md so remember about a separating char - default='', - help="Appends a given string at the end of each entry's header." - ) - parser.add_argument( - "--tag_activities", "-a", - action="store_true", - help="Tries to convert activities into valid tags.", - dest="ACTIVITIES_AS_TAGS" - ) - parser.add_argument( - "-colour", "--color", - action="store_true", - help="Prepends a colour emoji to each entry depending on mood.", - dest="colour" - ) - parser.add_argument( - '--version', - action='version', - version='%(prog)s 2.0' - ) - parser.add_argument( - "--header", - type=int, - default=2, - help="Headings level for each entry.", - dest="HEADER_LEVEL" - ) - parser.add_argument( - "--tags", - help="Tags in the YAML of each note.", - default="daily", - dest="TAGS" - ) - # TODO: Force-argument does nothing yet. - parser.add_argument( - "--force", - choices=["accept", "refuse"], - help="Skips user confirmation when overwriting files. Either force 'accept' or 'refuse' all requests." - ) - parser.add_argument( - "--csv-delimiter", - default="|", - help="Set delimiter for activities in Daylio .CSV, e.g: football | chess" - ) - # TODO: User should be able to set verbosity level in logging +# Add some common options +options.get_console().add_argument( + '--version', + action='version', + version='%(prog)s 3.0' +) - return parser +# TODO: User should be able to set verbosity level in logging +# TODO: Force-argument does nothing yet. diff --git a/dated_entries_group.py b/dated_entries_group.py index 943d216..fea45f9 100644 --- a/dated_entries_group.py +++ b/dated_entries_group.py @@ -7,9 +7,29 @@ """ import re import logging + +import dated_entry import errors +from typing import List from dated_entry import DatedEntry import utils +from config import options + + +class DatedEntryMissingError(utils.CustomException): + pass + + +class IncompleteDataRow(utils.CustomException): + pass + + +class InvalidDateError(utils.CustomException): + pass + + +class TriedCreatingDuplicateDatedEntryError(utils.CustomException): + pass class ErrorMsg(errors.ErrorMsgBase): @@ -19,17 +39,19 @@ class ErrorMsg(errors.ErrorMsgBase): class Date: """ Day, month and year of a particular date. Validates the date string on instantiation. - str(instance) returns the valid date in the YYYY-MM-DD format. """ - def __init__(self, string): + + def __init__(self, string: str): + """ + :raises InvalidDateError: if :param:`string` is not a valid date (for example the month number > 12) + :param string: on which entries have been created (`YYYY-MM-DD`) + """ self.__logger = logging.getLogger(self.__class__.__name__) # does it have a valid format YYYY-MM-DD valid_date_pattern = re.compile(r'^\d{4}-\d{1,2}-\d{1,2}$') - msg_on_error = ErrorMsg.print(ErrorMsg.WRONG_VALUE, string, "YYYY-MM-DD") if not re.match(valid_date_pattern, string): - self.__logger.warning(msg_on_error) - raise ValueError + raise InvalidDateError # does it have valid ranges? (year ranges are debatable) date_array = string.strip().split('-') @@ -37,22 +59,22 @@ def __init__(self, string): 1900 < int(date_array[0]) < 2100, 0 < int(date_array[1]) < 13, 0 < int(date_array[2][:2]) < 32)): - self.__logger.warning(msg_on_error) - raise ValueError + raise InvalidDateError self.__year = date_array[0] self.__month = date_array[1] self.__day = date_array[2] - def __str__(self): + def __str__(self) -> str: + """ + :return: returns the valid date in the YYYY-MM-DD format. This is the superior format, end of discussion. + """ return '-'.join([self.__year, self.__month, self.__day]) class DatedEntriesGroup(utils.Core): """ A particular date which groups entries written that day. - Raises ValueError if instantiated with a wrong date format. - Otherwise, it is truthy if it has any DatedEntry children, or falsy if it does not. Imagine it as a scribe, holding a stack of papers in his hand. The master Librarian knows each one of the scribes, including this one. @@ -62,39 +84,75 @@ class DatedEntriesGroup(utils.Core): def __init__(self, date): self.__logger = logging.getLogger(self.__class__.__name__) super().__init__() - self.set_uid(Date(date)) - self.__hash = hash(self.get_uid()) - self.__known_dated_entries = {} + try: + self.set_uid(Date(date)) + except InvalidDateError: + self.__logger.warning(ErrorMsg.print(ErrorMsg.WRONG_VALUE, date, "YYYY-MM-DD")) + raise ValueError + else: + self.__hash = hash(self.get_uid()) + self.__known_dated_entries = {} def __hash__(self): return self.__hash def __bool__(self): + """ + :return: ``True`` if itself has any :class:`DatedEntry` children + """ return all(( super().__bool__(), len(self.__known_dated_entries) > 0 )) - def access_dated_entry(self, time): + def create_dated_entry_from_row(self, + line: dict[str], + known_moods: dict[List[str]] = None) -> dated_entry.DatedEntry: """ - Retrieve an already existing DatedEntry object or create if missing. - Object is accessed or created only if the format is valid. - If the time format is invalid, it returns ValueError on DatedEntry instantiation. + :func:`access_dated_entry` of :class:`DatedEntry` object with the specified parameters. + :raises TriedCreatingDuplicateDatedEntryError: if it would result in making a duplicate :class:`DatedEntry` + :raises IncompleteDataRow: if ``line`` does not have ``time`` and ``mood`` keys at the very least + :param line: a dictionary of strings. Required keys: mood, activities, note_title & note. + :param known_moods: each key of the dict should have a set of strings containing moods. """ - # do you already have an entry from that time? - if time in self.get_known_dated_entries(): - self.__logger.debug(ErrorMsg.print(ErrorMsg.OBJECT_FOUND, time)) - return self.__known_dated_entries[time] + # TODO: test case this + # Try accessing the minimum required keys + for key in ["time", "mood"]: + try: + line[key] + except KeyError: + raise IncompleteDataRow(key) + + # Check if there's already an object with this time + if line["time"] in self.__known_dated_entries: + raise TriedCreatingDuplicateDatedEntryError else: - dated_entry_obj = DatedEntry(time, parent=self) - self.__logger.debug(ErrorMsg.print(ErrorMsg.OBJECT_NOT_FOUND, time)) - self.__known_dated_entries[time] = dated_entry_obj - return dated_entry_obj - - def get_known_dated_entries(self): + # Instantiate the entry + entry = dated_entry.DatedEntry( + line["time"], + line["mood"], + self, + known_moods, + activities=line["activities"], + title=line["title"], + note=line["note"] + ) + return entry + + def access_dated_entry(self, time: str) -> DatedEntry: """ - Retrieve the list of known entries. Returns a dictionary of keys. - If it is queried, the method does not validate the correctness of the query. + Retrieve an already existing DatedEntry object. + :param time: any string, but if it's not a valid HH:MM format then I guarantee it won't be found either way + :raises DatedEntryMissingError: if the entry specified in ``time`` does not exist + :returns: :class:`DatedEntry` """ - return self.__known_dated_entries + try: + ref = self.__known_dated_entries[time] + except KeyError: + msg = ErrorMsg.print(ErrorMsg.OBJECT_NOT_FOUND, time) + self.__logger.warning(msg) + raise DatedEntryMissingError(msg) + else: + self.__logger.debug(ErrorMsg.print(ErrorMsg.OBJECT_FOUND, time)) + return ref diff --git a/dated_entry.py b/dated_entry.py index 151996c..34fedc0 100644 --- a/dated_entry.py +++ b/dated_entry.py @@ -7,16 +7,77 @@ """ import logging import re +from typing import Match +from typing import List +import dated_entries_group +from config import options import errors import utils - -def is_time_format_valid(string): +# Adding DatedEntry-specific options in global_settings +dated_entry_settings = options.get_console().add_argument_group( + "Dated Entries", + "Handles how entries should be formatted" +) +dated_entry_settings.add_argument( + "--tags", + help="Tags in the YAML of each note.", + nargs='*', # this allows, for example, "--tags one two three" --> ["one", "two", "three"] + default="daily", + dest="TAGS" +) +dated_entry_settings.add_argument( + "--prefix", # <here's your prefix>YYYY-MM-DD.md so remember about a separating char + default='', + help="Prepends a given string to each entry's header." +) +dated_entry_settings.add_argument( + "--suffix", # YYYY-MM-DD<here's your suffix>.md so remember about a separating char + default='', + help="Appends a given string at the end of each entry's header." +) +dated_entry_settings.add_argument( + "--tag_activities", "-a", + action="store_true", # default=True + help="Tries to convert activities into valid tags.", + dest="ACTIVITIES_AS_TAGS" +) +dated_entry_settings.add_argument( + "-colour", "--color", + action="store_true", # default=True + help="Prepends a colour emoji to each entry depending on mood.", + dest="colour" +) +dated_entry_settings.add_argument( + "--header", + type=int, + default=2, + help="Headings level for each entry.", + dest="HEADER_LEVEL" +) +dated_entry_settings.add_argument( + "--csv-delimiter", + default="|", + help="Set delimiter for activities in Daylio .CSV, e.g: football | chess" +) + + +def is_time_format_valid(string: str) -> Match[str] | None: + """ + Is the time format of :param:`str` valid? + :param string: time to check + :return: True if :param:`str` follows the ``HH:MM`` format, with optional AM/PM appended + """ return re.compile(r'^([0-1]?[0-9]|2[0-3]):[0-5][0-9]($|\sAM|\sPM)').match(string) -def is_time_range_valid(string): +def is_time_range_valid(string: str) -> bool: + """ + Is the time range of :param:`str` valid? + :param string: time to check + :return: True if hour and minute ranges are both ok, False otherwise + """ time_array = string.strip().split(':') # Check if it's in 12-hour format (AM/PM) or 24-hour format @@ -31,9 +92,11 @@ def is_time_range_valid(string): return all((is_hour_ok, is_minutes_ok)) -def slice_quotes(string): +def slice_quotes(string: str) -> str: """ Gets rid of initial and terminating quotation marks inserted by Daylio + :param string: string to be sliced + :returns: string without quotation marks in the beginning and end of the initial string, even if it means empty str. """ if len(string) > 2: return string.strip("\"") @@ -49,10 +112,17 @@ class ErrorMsg(errors.ErrorMsgBase): class Time: """ Hour and minutes of a particular moment in time. Validates the time string on instantiation. - str(instance) returns the valid time in the HH:MM format. + str(instance) returns the valid time in the ``HH:MM`` format. + :raises ValueError: if string is not a valid time in ``HH:MM`` format (either AM/PM or 24h) """ def __init__(self, string): + """ + Upon instantiation checks if the time is valid. + Used in :class:`DatedEntry` to create an instance of this class. + :raises ValueError: if string is not a valid time in ``HH:MM`` format with optional AM/PM appended + :param string: time in ``HH:MM`` format - can have AM/PM appended + """ self.__logger = logging.getLogger(self.__class__.__name__) if is_time_format_valid(string) and is_time_range_valid(string): @@ -75,20 +145,66 @@ class DatedEntry(utils.Core): """ def __init__(self, - time, - parent, - mood, - known_moods): + time: str, + mood: str, + parent: dated_entries_group.DatedEntriesGroup, + known_moods: dict[List[str]], + activities: str = None, + title: str = None, + note: str = None): + # TODO: have to test the whole instantiation function again after refactoring self.__logger = logging.getLogger(self.__class__.__name__) super().__init__() - self.set_uid(Time(time)) + # Processing required properties + # --- + # Time + try: + self.set_uid(Time(time)) + except ValueError: + raise ValueError + + # Mood + if len(mood) == 0: + raise ValueError + else: + is_mood_valid = False + for i, (_, this_group) in enumerate(known_moods.items()): + if mood in this_group: + is_mood_valid = True + break + if not is_mood_valid: + self.__logger.warning(ErrorMsg.print(ErrorMsg.INVALID_MOOD, mood)) + # Assign it anyway. Warning is enough. + self.__mood = mood + + # Parent + if not isinstance(parent, dated_entries_group.DatedEntriesGroup): + raise ValueError + else: + self.__parent = parent - self.__mood = self.set_mood(mood, known_moods) + # Processing other, optional properties + # --- + # Process activities self.__activities = [] + array = slice_quotes(activities).split(options.csv_delimiter) + if len(array) > 0: + for activity in array: + self.__activities.append(utils.slugify( + activity, + options.tag_activities + )) + + # Process title self.__title = None + if len(title) > 0: + self.__title = slice_quotes(title) + + # Process note self.__note = None - self.__parent = parent + if len(note) > 0: + self.__note = slice_quotes(note) def __bool__(self): # A DatedEntry is truthy only if it contains a healthy parent, time/uid and mood @@ -99,52 +215,15 @@ def __bool__(self): self.get_parent() ]) - def set_mood(self, mood, list_of_moods): - valid = False - for i, (_, this_group) in enumerate(list_of_moods.items()): - if mood in this_group: - valid = True - if not valid: - self.__logger.warning(ErrorMsg.print(ErrorMsg.INVALID_MOOD, mood)) - # Assign it anyway. Warning is enough. - self.__mood = mood - return True - def get_mood(self): return self.__mood - def set_activities(self, pipe_delimited_activity_string, delimiter, should_taggify): - array = slice_quotes(pipe_delimited_activity_string).split(delimiter) - if len(array) > 0: - for activity in array: - self.__activities.append(utils.slugify( - activity, - should_taggify - )) - return True - else: - return False - def get_activities(self): return self.__activities - def set_title(self, title): - if len(title) > 0: - self.__title = slice_quotes(title) - return True - else: - return False - def get_title(self): return self.__title - def set_note(self, note): - if len(note) > 0: - self.__note = slice_quotes(note) - return True - else: - return False - def get_note(self): return self.__note diff --git a/errors.py b/errors.py index 32dea7c..af2f90d 100644 --- a/errors.py +++ b/errors.py @@ -1,20 +1,43 @@ import logging + +# Formatter with fancy additions - colour and bold support - used in the console logger handler +class FancyFormatter(logging.Formatter): + grey = "\x1b[38;20m" + yellow = "\x1b[33;20m" + red = "\x1b[31;20m" + bold_red = "\x1b[31;1m" + reset = "\x1b[0m" + # TODO: seems like format does not apply, only colouring works + format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)" + + FORMATS = { + logging.DEBUG: grey + format + reset, + logging.INFO: grey + format + reset, + logging.WARNING: yellow + format + reset, + logging.ERROR: red + format + reset, + logging.CRITICAL: bold_red + format + reset + } + + def format(self, record): + log_fmt = self.FORMATS.get(record.levelno) + formatter = logging.Formatter(log_fmt) + return formatter.format(record) + + # Common logging configuration for the root logger # noinspection SpellCheckingInspection -msg_format = "(%(asctime)s) %(name)s [%(levelname)s]: %(message)s" -logging.basicConfig(level=logging.DEBUG, format=msg_format) -formatter = logging.Formatter(msg_format) +logging.basicConfig(level=logging.DEBUG) # Create a file handler for the root logger file_log_handler = logging.FileHandler("debug.log") file_log_handler.setLevel(logging.DEBUG) -file_log_handler.setFormatter(formatter) +file_log_handler.setFormatter(FancyFormatter()) # Create a console handler for the root logger console_log_handler = logging.StreamHandler() console_log_handler.setLevel(logging.WARNING) -console_log_handler.setFormatter(formatter) +console_log_handler.setFormatter(FancyFormatter()) # Add the handlers to the root logger logging.getLogger().addHandler(file_log_handler) @@ -34,9 +57,17 @@ class ErrorMsgBase: WRONG_VALUE = "Received {}, expected {}." @staticmethod - def print(message, *args): + def print(message: str, *args: str) -> str | None: """ Insert the args into an error message. If the error message expects n variables, provide n arguments. Returns a string with the already filled out message. """ - return message.format(*args) + expected_args = message.count("{}") + + if len(args) != expected_args: + logging.getLogger(__name__).warning( + f"Expected {expected_args} arguments for \"{message}\", but got {len(args)} instead." + ) + return None + else: + return message.format(*args) diff --git a/librarian.py b/librarian.py index b92bbd8..f7e2805 100644 --- a/librarian.py +++ b/librarian.py @@ -6,18 +6,45 @@ Librarian knows their identity and can call upon them when needed to recite their contents back to the Librarian. Here's a quick breakdown of what is the specialisation of this file in the journaling process: -_ALL NOTES_ -> notes written on a particular date -> a particular note +└── ALL NOTES + └── notes written on a particular date + └── a particular note """ import csv import json import logging import sys -import config +from config import options import errors import utils from dated_entries_group import DatedEntriesGroup +# Adding Librarian-specific options in global_settings +librarian_settings = options.get_console().add_argument_group( + "Librarian", + "Handles main options" +) +# 1. Filepath is absolutely crucial to even start processing +librarian_settings.add_argument( + "filepath", + type=str, + help="Specify path to the .CSV file" +) +# 2. Destination is not needed if user is only processing, but no destination makes it impossible to output that data. +librarian_settings.add_argument( + "destination", + type=str, + help="Path to folder to output finished files into." +) +# TODO: Force-argument does nothing yet. +librarian_settings.add_argument( + "--force", + choices=["accept", "refuse"], + default=None, + help="Skips user confirmation when overwriting files and auto-accepts or auto-refuses all requests." +) + class ErrorMsg(errors.ErrorMsgBase): FILE_INCOMPLETE = "{} is incomplete." @@ -32,6 +59,8 @@ class ErrorMsg(errors.ErrorMsgBase): COUNT_ROWS = "Found {} rows of data in {}." +# Here's a quick reference what a "minimal viable" JSON there needs to be if you want to have custom mood-sets. +# If you do not pass a custom one, the application uses the following structure as a fallback mood-set. standard_mood_set = { "rad": ["rad"], "good": ["good"], @@ -42,26 +71,67 @@ class ErrorMsg(errors.ErrorMsgBase): class Librarian: + """ + Orchestrates the entire process of parsing CSV & passing data to objects specialised to handle it as a journal. + The chain of command looks like this: + + ``Librarian`` -> :class:`DatedEntriesGroup` -> :class:`DatedEntries` + + --- + + **How to process the CSV** + + User only needs to instantiate this object and pass the appropriate arguments. + The processing does not require invoking any other functions. Functions of this class are therefore mostly private. + + --- + + **How to output the journal** + + TODO: add missing documentation + """ + def __init__(self, - path_to_file, - path_to_moods=None, - custom_config=config.get_defaults()): + path_to_file: str, # the only crucial parameter at this stage + path_to_output: str = None, # TODO: `None` should block any outputting functions + path_to_moods: str = None): + """ + :param path_to_file: The path to the CSV file for processing. + :param path_to_output: The path for outputting processed data as markdown files. + If user does not provide the output path, no output functionality will work. + :param path_to_moods: The path for a custom mood set file. + """ + self.__known_moods = standard_mood_set self.__known_dates = {} self.__logger = logging.getLogger(self.__class__.__name__) + self.__destination = path_to_output - self.__pass_file(path_to_file, custom_config) + # Let's start processing the file + # --- + # 1. Parse the path_to_moods JSON to see if a custom mood-set has to be used self.__set_custom_moods(path_to_moods) - def has_custom_moods(self): + # 2. Access the CSV file and get all the rows with content + # then pass the data to specialised data objects that can handle them in a structured way + self.__process_file(path_to_file) + + def has_custom_moods(self) -> bool: + """ + Has any .JSON with custom moods been processed by this Librarian instance? + :return: yes if custom moods have been set, false otherwise + """ return self.__known_moods != standard_mood_set - def __set_custom_moods(self, json_file): + def __set_custom_moods(self, json_file: str) -> bool: """ - Overwrite the standard mood-set with a custom one. - Mood-sets are used in output formatting to colour-code the dated entries. - Mood-set is a dict with five keys: rad, good, neutral, bad and awful. - Each key holds an array of any number of strings indicating various moods. + Overwrite the standard mood-set with a custom one. Mood-sets are used in colour-coding each dated entry. + + :param json_file: path to the .JSON file with a non-standard mood set. + Should have five keys: ``rad``, ``good``, ``neutral``, ``bad`` and ``awful``. + Each of those keys should hold an array of any number of strings indicating various moods. + **Example**: ``[{"good": ["good"]},...]`` + :returns: success or failure to set """ exp_path = utils.expand_path(json_file) try: @@ -77,7 +147,7 @@ def __set_custom_moods(self, json_file): self.__logger.warning(ErrorMsg.print(ErrorMsg.DECODE_ERROR, exp_path)) return False - # Try accessing each mood key to raise KeyError if missing + # Try accessing each mood key to watch for KeyError if missing for mood_key in self.__known_moods.keys(): try: tmp_mood_set[mood_key] @@ -91,88 +161,120 @@ def __set_custom_moods(self, json_file): self.__known_moods = tmp_mood_set return True - def __pass_file(self, filepath, custom_config): + def __process_file(self, filepath: str) -> bool: """ - Open and parse filepath. Then structure it into Date & DatedEntry objects. + Validates CSV file and processes it into iterable rows. + + :param filepath: path to CSV to be read + :returns: True if parsed > 0, False otherwise + :except FileNotFoundError: exits immediately with code 1 + :except PermissionError: exists immediately with code 1 + :except OSError: exists immediately with code 1 """ if not self.has_custom_moods(): self.__logger.info(ErrorMsg.print(ErrorMsg.STANDARD_MOODS_USED)) + # Let's determine if the file can be opened + # --- try: file = open(filepath, newline='', encoding='UTF-8') + # File has not been found except FileNotFoundError: self.__logger.critical(ErrorMsg.print(ErrorMsg.FILE_MISSING, filepath)) sys.exit(1) # no point in continuing + # Insufficient permissions to access the file except PermissionError: self.__logger.critical(ErrorMsg.print(ErrorMsg.PERMISSION_ERROR, filepath)) sys.exit(1) # no point in continuing + # Other error that makes it impossible to access the file except OSError: self.__logger.critical(OSError) sys.exit(1) # no point in continuing - expected_fields = [ - "full_date", - "date", - "weekday", - "time", - "mood", - "activities", - "note", - "note_title", - "note" - ] + # If the code reaches here, the program can access the file. + # Now let's determine if the file's contents are actually usable + # --- with file: # Is it a valid CSV? try: # strict parameter throws csv.Error if parsing fails + # if the parsing fails, exit immediately raw_lines = csv.DictReader(file, delimiter=',', quotechar='"', strict=True) except csv.Error: self.__logger.critical(ErrorMsg.print(ErrorMsg.DECODE_ERROR, filepath)) sys.exit(1) - # Does it have all the fields? + # Does it have all the fields? Push any missing field into an array for later reference + # Even if only one column from the list below is missing in the CSV, exit immediately + expected_fields = [ + "full_date", + "date", + "weekday", + "time", + "mood", + "activities", + "note", + "note_title", + "note" + ] missing_strings = [expected_field for expected_field in expected_fields if expected_field not in raw_lines.fieldnames] + if not missing_strings: self.__logger.debug(ErrorMsg.print(ErrorMsg.CSV_ALL_FIELDS_PRESENT)) else: self.__logger.critical( ErrorMsg.print( ErrorMsg.CSV_FIELDS_MISSING, - {', '.join(missing_strings)} + ', '.join(missing_strings) # which ones are missing - e.g. "date, mood, note" ) ) sys.exit(1) # Does it have any rows besides the header? + # If the file is empty or only has column headers, exit immediately try: next(raw_lines) except StopIteration: self.__logger.critical(ErrorMsg.print(ErrorMsg.FILE_EMPTY, filepath)) sys.exit(1) - # Do any of the rows lack required fields? + # If the code has reached this point and has not exited, it means both file and contents have to be ok + # Processing + # --- lines_parsed = 0 for line in raw_lines: - line: dict[str] # fix parser incorrectly assuming type - if len(line) < 7: - self.__logger.warning(ErrorMsg.print(ErrorMsg.FILE_INCOMPLETE, line)) - else: - entry = self.access_date(line["full_date"]).access_dated_entry(line["time"]) - entry.set_mood(line["mood"], self.__known_moods) - entry.set_activities(line["activities"], custom_config.csv_delimiter, custom_config.tag_activities) - entry.set_title(line["note_title"]) - entry.set_note(line["note"]) - lines_parsed += 1 - self.__logger.info(ErrorMsg.print(ErrorMsg.COUNT_ROWS, lines_parsed, filepath)) - - return self - - def access_date(self, target_date): + line: dict[str] + lines_parsed += self.__process_line(line) + + self.__logger.info(ErrorMsg.print(ErrorMsg.COUNT_ROWS, str(lines_parsed), filepath)) + + # If at least one line has been parsed, the following return resolves to True + return bool(lines_parsed) + + # TODO: I guess it is more pythonic to raise exceptions than return False if I cannot complete the task + # https://eli.thegreenplace.net/2008/08/21/robust-exception-handling/ + def __process_line(self, line: dict[str]) -> bool: + """ + Goes row-by-row and passes the content to objects specialised in handling it from a journaling perspective. + :param line: a dictionary with values from the currently processed CSV line + :return: True if all columns had values for this CSV ``line``, False otherwise + """ + # Does each of the 8 columns have values for this row? + if len(line) < 8: + # Even if rows are missing some fields, continue parsing, but log it as a warning + self.__logger.warning(ErrorMsg.print(ErrorMsg.FILE_INCOMPLETE, str(line))) + return False + else: + # Let DatedEntriesGroup handle the rest and increment the counter (True == 1) + return self.access_date(line["full_date"]).create_dated_entry_from_row(line, known_moods=self.__known_moods) + + def access_date(self, target_date: str) -> DatedEntriesGroup: date_obj = DatedEntriesGroup(target_date) # have you already filed this date? + # TODO: maybe I should use a Date object instead of a string for comparison in the dict? if target_date in self.__known_dates: self.__logger.debug(ErrorMsg.print(ErrorMsg.OBJECT_FOUND, target_date)) else: diff --git a/main.py b/main.py index 1e99343..3ae21ef 100755 --- a/main.py +++ b/main.py @@ -1,34 +1,15 @@ """Parse a Daylio CSV into an Obsidian-compatible .MD file""" import logging -import os - -import config -import utils +from config import options from librarian import Librarian logger = logging.getLogger(__name__) -main_config = config.create_parser().parse_args() - -# SETTING THE EXPORT DIRECTORY -parsed_path = utils.expand_path(main_config.destination) -logger.info("Checking if destination %s exists...", parsed_path) -if not os.path.isdir(parsed_path): - os.makedirs(main_config.destination) - logger.info("Destination missing... Created") -else: - logger.info("Destination exists...") -Librarian( - path_to_file="_tests/sheet-1-valid-data.csv", - path_to_moods="moods.json", - custom_config=main_config) +# Compile global settings +# --- +# Read arguments from console and update the global_settings accordingly +options.get_console().parse_console() -# for day, entries in days.items(): -# note_contents = compile_md.compile_note_yaml() -# for current_entry in entries: -# note_contents += compile_md.compile_entry_contents(current_entry) -# current_note = write_md.Note(day, note_contents) -# try: -# current_note.create_file() -# except PermissionError: -# logger.debug("User refused to overwrite the file.") +# And now let's start processing +# --- +Librarian(path_to_file=options.settings.filepath) diff --git a/utils.py b/utils.py index 07e4859..4a13785 100755 --- a/utils.py +++ b/utils.py @@ -21,26 +21,32 @@ def __bool__(self): def __str__(self): return str(self.__uid) + # TODO: These are supposed to be pythonic setters, not this imitation def set_uid(self, value): self.__uid = value - return True def get_uid(self): return str(self.__uid) -def slugify(text, taggify): +class CustomException(Exception): + def __init__(self, message=None): + super().__init__(message) + self.message = message + + +def slugify(text: str, taggify: bool): # noinspection SpellCheckingInspection """ Simple slugification function to transform text. Works on non-latin characters too. """ logger = logging.getLogger(__name__) text = str(text).lower() - text = re.sub(re.compile(r"\s+"), '-', text) # Replace spaces with - - text = re.sub(re.compile(r"[^\w\-]+"), '', text) # Remove all non-word chars - text = re.sub(re.compile(r"--+"), '-', text) # Replace multiple - with single - - text = re.sub(re.compile(r"^-+"), '', text) # Trim - from start of text - text = re.sub(re.compile(r"-+$"), '', text) # Trim - from end of text + text = re.sub(re.compile(r"\s+"), '-', text) # Replace spaces with - + text = re.sub(re.compile(r"[^\w\-]+"), '', text) # Remove all non-word chars + text = re.sub(re.compile(r"--+"), '-', text) # Replace multiple - with single - + text = re.sub(re.compile(r"^-+"), '', text) # Trim - from start of text + text = re.sub(re.compile(r"-+$"), '', text) # Trim - from end of text if taggify: if re.match('[0-9]', text): logger.warning(ErrorMsg.print(ErrorMsg.INVALID_OBSIDIAN_TAGS, text)) From 6a8b190302cab71a05247a3a0b700a8d1a2e40de Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Fri, 5 Jan 2024 09:31:54 +0100 Subject: [PATCH 06/40] SettingsManager no longer has separate spoofed function for testing --- _tests/test_config.py | 166 ++++++++++++++++++++++++++++++------------ config.py | 36 ++------- 2 files changed, 128 insertions(+), 74 deletions(-) diff --git a/_tests/test_config.py b/_tests/test_config.py index d78f8d6..947d43a 100644 --- a/_tests/test_config.py +++ b/_tests/test_config.py @@ -1,57 +1,131 @@ from unittest import TestCase -from config import options + +import config class TestSettingsManager(TestCase): - def setUp(self) -> None: - self.librarian_settings = options.get_console().add_argument_group( - "Librarian", - "Handles main options" + def test_spoofed_keyword_option_without_equality_sign(self): + """ + In this case, SettingsManager can receive either "--force accept", "--force refuse" or None as prog arguments. + Check if it behaves properly with various user inputs as console arguments. + """ + # Setup + options_to_check = config.SettingsManager() + options_to_check.get_console().add_argument( + "--force", + choices=["accept", "refuse"], + default=None ) - self.librarian_settings.add_argument( - "filepath", - type=str, - help="Specify path to the .CSV file" - ) - self.librarian_settings.add_argument( - "destination", - default="", - type=str, - help="Path to folder to output finished files into." - ) - self.filepath_to_check = "_tests/sheet-1-valid-data.csv" - self.destination_to_check = "somewhere" - self.force_option_to_check = "accept" - - def test_spoofed_librarian_settings_without_equality_sign(self): - options.parse_spoofed_console([ - self.filepath_to_check, - self.destination_to_check, - "--force", self.force_option_to_check]) - self.assertEqual(options.settings.filepath, self.filepath_to_check) - self.assertEqual(options.settings.destination, self.destination_to_check) - self.assertEqual(options.settings.force, self.force_option_to_check) - - def test_spoofed_librarian_settings_with_equality_sign(self): - options.parse_spoofed_console([ - self.filepath_to_check, - self.destination_to_check, - "--force=" + self.force_option_to_check]) - self.assertEqual(options.settings.filepath, self.filepath_to_check) - self.assertEqual(options.settings.destination, self.destination_to_check) - self.assertEqual(options.settings.force, self.force_option_to_check) - def test_expected_failure_empty_argument_array(self): + # User input not in the dictionary of allowed options - should fail with self.assertRaises(SystemExit) as cm: - options.parse_spoofed_console([]) + options_to_check.parse_console(["--force", "yo-mama"]) self.assertEqual(cm.exception.code, 2, msg="Invalid arguments were passed to argparse so it should exit with 2") - def test_expected_failure_outside_of_dictionary(self): + # User input provided both options - should fail with self.assertRaises(SystemExit) as cm: - # noinspection SpellCheckingInspection - options.parse_spoofed_console( - [self.filepath_to_check, - self.destination_to_check, - "--force", - "yabadoo"]) + options_to_check.parse_console(["--force", "refuse", "accept"]) + self.assertEqual(cm.exception.code, 2, msg="Cannot both force-refuse and force-accept - should exit with 2") + + # User input correct - should pass + options_to_check.parse_console(["--force", "refuse"]) + options_to_check.parse_console(["--force", "accept"]) + + def test_spoofed_keyword_option_with_equality_sign(self): + """ + In this case, SettingsManager can receive either "--force=accept", "--force=refuse" or None as prog arguments. + Check if it behaves properly with various user inputs as console arguments. + """ + # Setup + options_to_check = config.SettingsManager() + options_to_check.get_console().add_argument( + "--force", + choices=["accept", "refuse"], + default=None + ) + + # User input not in the dictionary of allowed options - should fail + with self.assertRaises(SystemExit) as cm: + options_to_check.parse_console(["--force=yo-mama"]) self.assertEqual(cm.exception.code, 2, msg="Invalid arguments were passed to argparse so it should exit with 2") + + # User input provided both options - should fail + with self.assertRaises(SystemExit) as cm: + options_to_check.parse_console(["--force=refuse --force=accept"]) + self.assertEqual(cm.exception.code, 2, msg="Cannot both force-refuse and force-accept - should exit with 2") + + # User input correct - should pass + options_to_check.parse_console(["--force=refuse"]) + self.assertEqual(options_to_check.force, "refuse") + + # User input correct - should pass + options_to_check.parse_console(["--force=accept"]) + self.assertEqual(options_to_check.force, "accept") + + def test_check_if_required_arguments_passed(self): + # Setup + options_to_check = config.SettingsManager() + options_to_check.get_console().add_argument( + "filepath", + type=str + ) + options_to_check.get_console().add_argument( + "--optional_arg", + type=str + ) + + # User did not provide the required argument - should fail + with self.assertRaises(SystemExit) as cm: + options_to_check.parse_console(["--optional_arg", "haha"]) + print(options_to_check) + self.assertEqual(cm.exception.code, 2, msg="No filepath provided - should exit with 2") + + # User input correct - should pass + options_to_check.parse_console(["wakanda forever"]) + self.assertEqual(options_to_check.filepath, "wakanda forever") + + def test_expected_failure_empty_argument_array(self): + # Setup + options_to_check = config.SettingsManager() + options_to_check.get_console().add_argument( + "filepath", + type=str + ) + + # User provided no arguments whatsoever - should fail + with self.assertRaises(SystemExit) as cm: + options_to_check.parse_console([]) + self.assertEqual(cm.exception.code, 2, msg="No arguments provided to argparse so it should exit with 2") + + # TODO: test Namespace=self where SettingsManager overwrites its default attributes with argparse + def test_if_settings_manager_overwrites_its_properties_from_console(self): + """ + SettingsManager has default attributes as options already at instantiation. + This test case checks if the argparse can correctly overwrite these default attributes with its own. + """ + # Setup + options_to_check = config.SettingsManager() + options_to_check.filepath = "this is the default value" + + options_to_check.get_console().add_argument( + "filepath", + type=str + ) + options_to_check.get_console().add_argument( + "foo", + type=str, + help="this type of argument does not appear in the SettingsManager list of attributes at setup" + ) + options_to_check.get_console().add_argument( + "bar", + type=str, + help="this type of argument does not appear in the SettingsManager list of attributes at setup" + ) + + # User input correct - should pass + options_to_check.parse_console(["this is NOT the default value", "hello", "world"]) + self.assertEqual(options_to_check.filepath, "this is NOT the default value") + self.assertNotEquals(options_to_check.filepath, "this is the default value") + # because neither "foo" nor "bar" is part of the SettingsManager class, I need to access it like a key in dict + self.assertEqual(vars(options_to_check)["foo"], "hello") + self.assertEqual(vars(options_to_check)["bar"], "world") diff --git a/config.py b/config.py index c89a31a..5a6875a 100755 --- a/config.py +++ b/config.py @@ -23,7 +23,7 @@ """ import argparse import logging -from typing import List +from typing import List, Any # Logging for config library logger = logging.getLogger(__name__) @@ -43,11 +43,10 @@ def __init__(self): # --- # Librarian # --- + self.filepath = None + self.destination = None + # TODO: Force-argument does nothing yet. self.force = None - # if you're wondering about these two below - I'd rather omit here positional arguments like these - # it makes more sense for them to be passed as function arguments when initialising Librarian object - # self.filepath = "/_tests/sheet-1-valid-data.csv" - # self.destination = "/_tests/output_results/" # --- # Dated Entry # --- @@ -58,6 +57,7 @@ def __init__(self): self.suffix = '' self.tag_activities = True self.colour = True + # TODO: User should be able to set verbosity level in logging def get_console(self): """ @@ -66,9 +66,10 @@ def get_console(self): """ return self.__console_arguments - def parse_console(self): + def parse_console(self, args: List[Any]): """ Configures SettingsManager by accessing the console and retrieving the arguments used to run the script. + :param args: either console arguments from sys.argv or spoofed ones """ # namespace=self adds the properties to the SettingsManager obj, instead of creating a new Namespace obj # Without namespace=self @@ -81,25 +82,7 @@ def parse_console(self): # - SettingsManager # - foo = foo # - bar = bar - self.__console_arguments.parse_args(namespace=self) - - def parse_spoofed_console(self, spoofed_string_of_args: List[str]): - """ - Configures SettingsManager without accessing the console. Useful for testing purposes. Don't use it elsewhere. - :param spoofed_string_of_args: Set of strs with positional and optional arguments as if written in CMD. - """ - # namespace=self adds the properties to the SettingsManager obj, instead of creating a new Namespace obj - # Without namespace=self - # --- - # - SettingsManager - # - Namespace obj that holds actual settings - # - foo = foo - # - bar = bar - # With namespace=self - # - SettingsManager - # - foo = foo - # - bar = bar - self.__console_arguments.parse_args(args=spoofed_string_of_args, namespace=self) + self.__console_arguments.parse_args(args=args, namespace=self) # Global configuration @@ -112,6 +95,3 @@ def parse_spoofed_console(self, spoofed_string_of_args: List[str]): action='version', version='%(prog)s 3.0' ) - -# TODO: User should be able to set verbosity level in logging -# TODO: Force-argument does nothing yet. From aa8d8e772f3240284a12d86f392db600f5a885ce Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Fri, 5 Jan 2024 11:05:56 +0100 Subject: [PATCH 07/40] 1. made properly pythonic getters and setters in classes like utils.Core and dated_entry.DatedEntry 2. made use of custom exceptions rather than the standard ones 3. fixed test_dated_entry.py 4. __bool__ is no longer needed in DatedEntry class as it cannot be instantiated without those attributes being truthy 5. some functions previously only issued warnings, not they raise exceptions for further handling 6. some code refactoring 7. dated_entries_group.py now properly responds to exceptions raised by dated_entry.py --- _tests/test_dated_entries_group.py | 34 ++++---- _tests/test_dated_entry.py | 73 +++++++++++------ _tests/test_librarian.py | 15 ++-- dated_entries_group.py | 56 ++++++------- dated_entry.py | 96 +++++++++++----------- librarian.py | 125 ++++++++++++++++++++--------- main.py | 4 +- utils.py | 22 +++-- 8 files changed, 253 insertions(+), 172 deletions(-) diff --git a/_tests/test_dated_entries_group.py b/_tests/test_dated_entries_group.py index d5b2628..a07721a 100644 --- a/_tests/test_dated_entries_group.py +++ b/_tests/test_dated_entries_group.py @@ -7,9 +7,9 @@ def setUp(self): self.sample_date = DatedEntriesGroup("2011-10-10") def test_get_date(self): - self.assertEqual(DatedEntriesGroup("2023-10-15").get_uid(), "2023-10-15") - self.assertEqual(DatedEntriesGroup("2019-5-9").get_uid(), "2019-5-9") - self.assertEqual(DatedEntriesGroup("2023-11-25").get_uid(), "2023-11-25") + self.assertEqual(DatedEntriesGroup("2023-10-15").uid, "2023-10-15") + self.assertEqual(DatedEntriesGroup("2019-5-9").uid, "2019-5-9") + self.assertEqual(DatedEntriesGroup("2023-11-25").uid, "2023-11-25") self.assertRaises(ValueError, DatedEntriesGroup, "00-") self.assertRaises(ValueError, DatedEntriesGroup, "2199-32-32") @@ -48,17 +48,19 @@ def test_access_dated_entry(self): - former will raise ValueError if time is invalid - latter will raise KeyError if time is invalid """ - self.assertEqual(self.sample_date.access_dated_entry("10:00 AM").get_uid(), "10:00 AM") - self.assertEqual(self.sample_date.access_dated_entry("9:30 PM").get_uid(), "9:30 PM") + self.assertEqual(self.sample_date.access_dated_entry("10:00 AM").uid, "10:00 AM") + self.assertEqual(self.sample_date.access_dated_entry("9:30 PM").uid, "9:30 PM") # Test cases for 12-hour format self.assertRaises(ValueError, self.sample_date.access_dated_entry, "2: AM") # Invalid format self.assertRaises(ValueError, self.sample_date.access_dated_entry, "15:45 PM") # Invalid hour (more than 12) + # noinspection SpellCheckingInspection self.assertRaises(ValueError, self.sample_date.access_dated_entry, "11:30 XM") # Invalid meridiem indicator # Test cases for 24-hour format self.assertRaises(ValueError, self.sample_date.access_dated_entry, "25:15") # Invalid hour (more than 24) self.assertRaises(ValueError, self.sample_date.access_dated_entry, "14:45 PM") + # noinspection SpellCheckingInspection self.assertRaises(ValueError, self.sample_date.access_dated_entry, "03:20 XM") # Invalid meridiem indicator in 24-hour format @@ -83,13 +85,13 @@ def test_get_known_dated_entries(self): self.sample_date.access_dated_entry("12:12") self.sample_date.access_dated_entry("13:13") - self.assertEqual(self.sample_date.get_known_dated_entries()["11:11"].get_uid(), "11:11") - self.assertEqual(self.sample_date.get_known_dated_entries()["12:12"].get_uid(), "12:12") - self.assertEqual(self.sample_date.get_known_dated_entries()["13:13"].get_uid(), "13:13") + self.assertEqual(self.sample_date.known_entries_from_this_day["11:11"].uid, "11:11") + self.assertEqual(self.sample_date.known_entries_from_this_day["12:12"].uid, "12:12") + self.assertEqual(self.sample_date.known_entries_from_this_day["13:13"].uid, "13:13") - self.assertRaises(KeyError, lambda: self.sample_date.get_known_dated_entries()["23:00"]) - self.assertRaises(KeyError, lambda: self.sample_date.get_known_dated_entries()["9:30 PM"]) - self.assertRaises(KeyError, lambda: self.sample_date.get_known_dated_entries()["11:50 AM"]) + self.assertRaises(KeyError, lambda: self.sample_date.known_entries_from_this_day["23:00"]) + self.assertRaises(KeyError, lambda: self.sample_date.known_entries_from_this_day["9:30 PM"]) + self.assertRaises(KeyError, lambda: self.sample_date.known_entries_from_this_day["11:50 AM"]) def test_truthiness_of_dated_entries_group(self): """ @@ -112,15 +114,15 @@ def test_no_duplicate_entries_created(self): DatedEntriesGroup should return the already existing entry if it is known, instead of creating a duplicate. """ obj = self.sample_date.access_dated_entry("11:11") - obj.set_note("I already exist, see?") + obj.note = "I already exist, see?" - self.assertEqual(self.sample_date.access_dated_entry("11:11").get_note(), obj.get_note()) + self.assertEqual(self.sample_date.access_dated_entry("11:11").note, obj.note) def test_retrieve_known_entries(self): obj1 = self.sample_date.access_dated_entry("11:11") obj2 = self.sample_date.access_dated_entry("12:12") obj3 = self.sample_date.access_dated_entry("13:13") - self.assertEqual(self.sample_date.get_known_dated_entries()["11:11"], obj1) - self.assertEqual(self.sample_date.get_known_dated_entries()["12:12"], obj2) - self.assertEqual(self.sample_date.get_known_dated_entries()["13:13"], obj3) + self.assertEqual(self.sample_date.known_entries_from_this_day["11:11"], obj1) + self.assertEqual(self.sample_date.known_entries_from_this_day["12:12"], obj2) + self.assertEqual(self.sample_date.known_entries_from_this_day["13:13"], obj3) diff --git a/_tests/test_dated_entry.py b/_tests/test_dated_entry.py index 8a46bdd..021b813 100644 --- a/_tests/test_dated_entry.py +++ b/_tests/test_dated_entry.py @@ -1,32 +1,57 @@ from unittest import TestCase -import hack -import dated_entry +from dated_entry import Time, slice_quotes, DatedEntry, IsNotTimeError -class Test(TestCase): +class TestDatedEntryUtils(TestCase): def test_slice_quotes(self): - self.assertEqual(dated_entry.slice_quotes("\"test\""), "test") - self.assertEqual(dated_entry.slice_quotes("\"\""), "") + self.assertEqual(slice_quotes("\"test\""), "test") + self.assertEqual(slice_quotes("\"\""), "") - def test_is_time_valid(self): + +class TestTime(TestCase): + def test_try_creating_valid_times(self): # Valid time formats - self.assertTrue(dated_entry.Time("1:49 AM")) - self.assertTrue(dated_entry.Time("02:15 AM")) - self.assertTrue(dated_entry.Time("12:00 PM")) - self.assertTrue(dated_entry.Time("6:30 PM")) - self.assertTrue(dated_entry.Time("9:45 PM")) - self.assertTrue(dated_entry.Time("00:00 AM")) - self.assertTrue(dated_entry.Time("12:00 AM")) - self.assertTrue(dated_entry.Time("13:30")) - self.assertTrue(dated_entry.Time("9:45")) + self.assertTrue(Time("1:49 AM")) + self.assertTrue(Time("02:15 AM")) + self.assertTrue(Time("12:00 PM")) + self.assertTrue(Time("6:30 PM")) + self.assertTrue(Time("9:45 PM")) + self.assertTrue(Time("00:00 AM")) + self.assertTrue(Time("12:00 AM")) + self.assertTrue(Time("13:30")) + self.assertTrue(Time("9:45")) + def test_try_creating_invalid_times(self): # Invalid time formats - self.assertRaises(ValueError, dated_entry.Time, "okk:oksdf s") - self.assertRaises(ValueError, dated_entry.Time, "14:59 AM") - self.assertRaises(ValueError, dated_entry.Time, "25:00 AM") - self.assertRaises(ValueError, dated_entry.Time, "26:10") - self.assertRaises(ValueError, dated_entry.Time, "12:60 PM") - self.assertRaises(ValueError, dated_entry.Time, "12:00 XX") - self.assertRaises(ValueError, dated_entry.Time, "abc:def AM") - self.assertRaises(ValueError, dated_entry.Time, "24:00 PM") - self.assertRaises(ValueError, dated_entry.Time, "00:61 AM") + # noinspection SpellCheckingInspection + self.assertRaises(IsNotTimeError, Time, "okk:oksdf s") + self.assertRaises(IsNotTimeError, Time, "14:59 AM") + self.assertRaises(IsNotTimeError, Time, "25:00 AM") + self.assertRaises(IsNotTimeError, Time, "26:10") + self.assertRaises(IsNotTimeError, Time, "12:60 PM") + self.assertRaises(IsNotTimeError, Time, "12:00 XX") + self.assertRaises(IsNotTimeError, Time, "abc:def AM") + self.assertRaises(IsNotTimeError, Time, "24:00 PM") + self.assertRaises(IsNotTimeError, Time, "00:61 AM") + + +class TestDatedEntry(TestCase): + def test_bare_minimum_dated_entries(self): + # When + bare_minimum_dated_entry = DatedEntry( + time="1:49 AM", + mood="vaguely ok", + known_moods={ + "neutral": ["vaguely ok"] + } + ) + + # Then + self.assertTrue(bare_minimum_dated_entry.mood, "vaguely ok") + self.assertTrue(bare_minimum_dated_entry.uid, "1:49 AM") + self.assertIsNone(bare_minimum_dated_entry.title) + self.assertIsNone(bare_minimum_dated_entry.note) + self.assertTrue(bare_minimum_dated_entry.activities, []) + + def test_insufficient_dated_entries(self): + self.assertRaises(ValueError, DatedEntry, "2:00", mood="", known_moods={"neutral": ["vaguely ok"]}) diff --git a/_tests/test_librarian.py b/_tests/test_librarian.py index 61995f7..eea4efe 100644 --- a/_tests/test_librarian.py +++ b/_tests/test_librarian.py @@ -1,6 +1,8 @@ import csv import json from unittest import TestCase + +import librarian from librarian import Librarian @@ -12,13 +14,12 @@ class TestLibrarian(TestCase): """ def test_set_custom_moods(self): """ - Pass faulty moods and see if Librarian notices it does not know any custom moods while parsing. + Pass faulty moods and see if it fails as expected. """ - # assertTrue is not needed, because it would have already failed at setUp() - self.assertFalse(Librarian("sheet-2-corrupted-bytes.csv").has_custom_moods()) - self.assertFalse(Librarian("sheet-3-wrong-format.txt").has_custom_moods()) - self.assertFalse(Librarian("sheet-4-no-extension.csv").has_custom_moods()) - self.assertFalse(Librarian("incomplete-moods.json").has_custom_moods()) + self.assertRaises(librarian.CannotAccessFileError, Librarian("sheet-2-corrupted-bytes.csv")) + self.assertFalse(librarian.CannotAccessFileError, Librarian("sheet-3-wrong-format.txt")) + self.assertFalse(librarian.CannotAccessFileError, Librarian("sheet-4-no-extension.csv")) + self.assertFalse(librarian.CannotAccessFileError, Librarian("incomplete-moods.json")) def test_pass_file(self): """ @@ -68,7 +69,7 @@ def test_has_custom_moods(self): self.assertTrue(Librarian( path_to_file="sheet-1-valid-data.csv", path_to_moods="../moods.json" - ).has_custom_moods()) + ).custom_moods) self.assertFalse(Librarian("sheet-1-valid-data.csv")) self.assertRaises(json.JSONDecodeError, Librarian, "sheet-1-valid-data.csv", "empty_sheet.csv") self.assertRaises(FileNotFoundError, Librarian, "sheet-1-valid-data.csv", "missing-file.json") diff --git a/dated_entries_group.py b/dated_entries_group.py index fea45f9..c11dd55 100644 --- a/dated_entries_group.py +++ b/dated_entries_group.py @@ -17,19 +17,19 @@ class DatedEntryMissingError(utils.CustomException): - pass + """The :class:`DatedEntry` does not exist.""" class IncompleteDataRow(utils.CustomException): - pass + """Passed a row of data from CSV file that does not have all required fields.""" class InvalidDateError(utils.CustomException): - pass + """String is not a valid date. Check :class:`Date` for details.""" class TriedCreatingDuplicateDatedEntryError(utils.CustomException): - pass + """Tried to create object of :class:`DatedEntry` that would be a duplicate of one that already exists.""" class ErrorMsg(errors.ErrorMsgBase): @@ -79,40 +79,35 @@ class DatedEntriesGroup(utils.Core): Imagine it as a scribe, holding a stack of papers in his hand. The master Librarian knows each one of the scribes, including this one. However, the scribe knows only his papers. The papers contain all entries written that particular date. + + Truthy if it knows at least one :class:`DatedEntry` made on this :class:`Date`. + :raises ValueError: if the date string is deemed invalid by :class:`Date` """ def __init__(self, date): self.__logger = logging.getLogger(self.__class__.__name__) - super().__init__() try: - self.set_uid(Date(date)) + super().__init__(Date(date)) except InvalidDateError: - self.__logger.warning(ErrorMsg.print(ErrorMsg.WRONG_VALUE, date, "YYYY-MM-DD")) - raise ValueError + msg = ErrorMsg.print(ErrorMsg.WRONG_VALUE, date, "YYYY-MM-DD") + self.__logger.warning(msg) + raise ValueError(msg) else: - self.__hash = hash(self.get_uid()) - self.__known_dated_entries = {} + self.__hash = hash(self.uid) + self.__known_entries_for_this_date = {} def __hash__(self): return self.__hash - def __bool__(self): - """ - :return: ``True`` if itself has any :class:`DatedEntry` children - """ - return all(( - super().__bool__(), - len(self.__known_dated_entries) > 0 - )) - def create_dated_entry_from_row(self, line: dict[str], - known_moods: dict[List[str]] = None) -> dated_entry.DatedEntry: + known_moods: dict[str, List[str]] = None) -> dated_entry.DatedEntry: """ :func:`access_dated_entry` of :class:`DatedEntry` object with the specified parameters. :raises TriedCreatingDuplicateDatedEntryError: if it would result in making a duplicate :class:`DatedEntry` :raises IncompleteDataRow: if ``line`` does not have ``time`` and ``mood`` keys at the very least + :raises ValueError: re-raises ValueError from :class:`DatedEntry` :param line: a dictionary of strings. Required keys: mood, activities, note_title & note. :param known_moods: each key of the dict should have a set of strings containing moods. """ @@ -125,20 +120,23 @@ def create_dated_entry_from_row(self, raise IncompleteDataRow(key) # Check if there's already an object with this time - if line["time"] in self.__known_dated_entries: + if line["time"] in self.__known_entries_for_this_date: raise TriedCreatingDuplicateDatedEntryError - else: - # Instantiate the entry + + # Instantiate the entry + try: entry = dated_entry.DatedEntry( line["time"], line["mood"], - self, known_moods, activities=line["activities"], - title=line["title"], + title=line["note_title"], note=line["note"] ) - return entry + except ValueError: + raise ValueError + else: + return entry def access_dated_entry(self, time: str) -> DatedEntry: """ @@ -148,7 +146,7 @@ def access_dated_entry(self, time: str) -> DatedEntry: :returns: :class:`DatedEntry` """ try: - ref = self.__known_dated_entries[time] + ref = self.__known_entries_for_this_date[time] except KeyError: msg = ErrorMsg.print(ErrorMsg.OBJECT_NOT_FOUND, time) self.__logger.warning(msg) @@ -156,3 +154,7 @@ def access_dated_entry(self, time: str) -> DatedEntry: else: self.__logger.debug(ErrorMsg.print(ErrorMsg.OBJECT_FOUND, time)) return ref + + @property + def known_entries_from_this_day(self): + return self.__known_entries_for_this_date diff --git a/dated_entry.py b/dated_entry.py index 34fedc0..18153a9 100644 --- a/dated_entry.py +++ b/dated_entry.py @@ -10,7 +10,6 @@ from typing import Match from typing import List -import dated_entries_group from config import options import errors import utils @@ -63,11 +62,15 @@ ) +class IsNotTimeError(utils.CustomException): + """Expected a string in a valid time format - HH:MM with optional AM/PM suffix.""" + + def is_time_format_valid(string: str) -> Match[str] | None: """ Is the time format of :param:`str` valid? :param string: time to check - :return: True if :param:`str` follows the ``HH:MM`` format, with optional AM/PM appended + :return: ``True`` if :param:`str` follows the ``HH:MM`` format, with optional AM/PM appended, ``False`` otherwise """ return re.compile(r'^([0-1]?[0-9]|2[0-3]):[0-5][0-9]($|\sAM|\sPM)').match(string) @@ -76,7 +79,7 @@ def is_time_range_valid(string: str) -> bool: """ Is the time range of :param:`str` valid? :param string: time to check - :return: True if hour and minute ranges are both ok, False otherwise + :return: ``True`` if hour and minute ranges are both ok, ``False`` otherwise """ time_array = string.strip().split(':') @@ -98,7 +101,7 @@ def slice_quotes(string: str) -> str: :param string: string to be sliced :returns: string without quotation marks in the beginning and end of the initial string, even if it means empty str. """ - if len(string) > 2: + if string is not None and len(string) > 2: return string.strip("\"") # only 2 characters? Then it is an empty cell. return "" @@ -113,77 +116,86 @@ class Time: """ Hour and minutes of a particular moment in time. Validates the time string on instantiation. str(instance) returns the valid time in the ``HH:MM`` format. - :raises ValueError: if string is not a valid time in ``HH:MM`` format (either AM/PM or 24h) + :raises IsNotTimeError: if string is not a valid time in ``HH:MM`` format (either AM/PM or 24h) """ - def __init__(self, string): + def __init__(self, string: str): """ Upon instantiation checks if the time is valid. Used in :class:`DatedEntry` to create an instance of this class. - :raises ValueError: if string is not a valid time in ``HH:MM`` format with optional AM/PM appended + :raises IsNotTime: if string is not a valid time in ``HH:MM`` format with optional AM/PM appended :param string: time in ``HH:MM`` format - can have AM/PM appended """ self.__logger = logging.getLogger(self.__class__.__name__) + # OK if is_time_format_valid(string) and is_time_range_valid(string): time_array = string.strip().split(':') self.__hour = time_array[0] self.__minutes = time_array[1] + + # NOT OK else: msg_on_error = ErrorMsg.print(ErrorMsg.WRONG_VALUE, string, "HH:MM (AM/PM/)") self.__logger.warning(msg_on_error) - raise ValueError(msg_on_error) + raise IsNotTimeError(msg_on_error) - def __str__(self): + def __str__(self) -> str: + """ + :return: Outputs its hour and minutes attributes as a string in valid time format - HH:MM. + """ return ':'.join([self.__hour, self.__minutes]) class DatedEntry(utils.Core): """ - Journal entry made at a given moment in time, and describing a particular emotional state. - It inherits None uid from utils.Core which is then set to self.time. Object is unusable without uid. + Journal entry. + **A journal entry cannot exists without:** + + * Time it was written at, as :class:`Time` + * Mood, that is - a dominant emotional state during that particular moment in time. + + **Other, optional attributes:** + + * title + * note + * activities performed during or around this time + + :raises ValueError: if at least one of the required attributes cannot be set properly """ def __init__(self, time: str, mood: str, - parent: dated_entries_group.DatedEntriesGroup, - known_moods: dict[List[str]], + known_moods: dict[str, List[str]], activities: str = None, title: str = None, note: str = None): # TODO: have to test the whole instantiation function again after refactoring self.__logger = logging.getLogger(self.__class__.__name__) - super().__init__() # Processing required properties # --- # Time try: - self.set_uid(Time(time)) - except ValueError: - raise ValueError + super().__init__(Time(time)) + except IsNotTimeError: + raise ValueError("Cannot create object without valid Time attribute") # Mood - if len(mood) == 0: - raise ValueError + if len(mood) == 0 or mood is None: + raise ValueError("Cannot create object without valid Mood attribute") else: - is_mood_valid = False + mood_is_in_dictionary = False for i, (_, this_group) in enumerate(known_moods.items()): if mood in this_group: - is_mood_valid = True + mood_is_in_dictionary = True break - if not is_mood_valid: + if not mood_is_in_dictionary: self.__logger.warning(ErrorMsg.print(ErrorMsg.INVALID_MOOD, mood)) # Assign it anyway. Warning is enough. self.__mood = mood - # Parent - if not isinstance(parent, dated_entries_group.DatedEntriesGroup): - raise ValueError - else: - self.__parent = parent - # Processing other, optional properties # --- # Process activities @@ -198,34 +210,26 @@ def __init__(self, # Process title self.__title = None - if len(title) > 0: + if title is not None and len(title) > 0: self.__title = slice_quotes(title) # Process note self.__note = None - if len(note) > 0: + if note is not None and len(note) > 0: self.__note = slice_quotes(note) - def __bool__(self): - # A DatedEntry is truthy only if it contains a healthy parent, time/uid and mood - return all([ - super().__bool__(), - self.get_uid(), - self.get_mood(), - self.get_parent() - ]) - - def get_mood(self): + @property + def mood(self): return self.__mood - def get_activities(self): + @property + def activities(self): return self.__activities - def get_title(self): + @property + def title(self): return self.__title - def get_note(self): + @property + def note(self): return self.__note - - def get_parent(self): - return self.__parent diff --git a/librarian.py b/librarian.py index f7e2805..40237d8 100644 --- a/librarian.py +++ b/librarian.py @@ -14,6 +14,7 @@ import json import logging import sys +from typing import List from config import options import errors @@ -70,6 +71,20 @@ class ErrorMsg(errors.ErrorMsgBase): } +class MissingValuesInRowError(utils.CustomException): + """If a CSV row does not have enough values needed to create an entry.""" + + +class CannotAccessFileError(utils.CustomException): + """The file could not be accessed.""" + + +class InvalidDataInFileError(utils.CustomException): + """The file does not follow the expected structure.""" + + +# I've found a term that describes what this class does - it is a Director - even sounds similar to Librarian +# https://refactoring.guru/design-patterns/builder class Librarian: """ Orchestrates the entire process of parsing CSV & passing data to objects specialised to handle it as a journal. @@ -99,6 +114,7 @@ def __init__(self, :param path_to_file: The path to the CSV file for processing. :param path_to_output: The path for outputting processed data as markdown files. If user does not provide the output path, no output functionality will work. + :raises CannotAccessFileError: if any problems occur during accessing or decoding the CSV file. :param path_to_moods: The path for a custom mood set file. """ @@ -110,20 +126,30 @@ def __init__(self, # Let's start processing the file # --- # 1. Parse the path_to_moods JSON to see if a custom mood-set has to be used - self.__set_custom_moods(path_to_moods) + if path_to_moods is not None: + try: + self.__set_custom_moods(path_to_moods) + except CannotAccessFileError: + pass # 2. Access the CSV file and get all the rows with content # then pass the data to specialised data objects that can handle them in a structured way - self.__process_file(path_to_file) + try: + self.__process_file(path_to_file) + except CannotAccessFileError: + raise CannotAccessFileError - def has_custom_moods(self) -> bool: + @property + def custom_moods(self) -> dict[str, List[str]] | None: """ - Has any .JSON with custom moods been processed by this Librarian instance? - :return: yes if custom moods have been set, false otherwise + :returns: dictionary of rad, good, neutral, bad and awful moods that this Librarian instance knows about """ - return self.__known_moods != standard_mood_set + if self.__known_moods != standard_mood_set: + return self.__known_moods + else: + return None - def __set_custom_moods(self, json_file: str) -> bool: + def __set_custom_moods(self, json_file: str): """ Overwrite the standard mood-set with a custom one. Mood-sets are used in colour-coding each dated entry. @@ -131,6 +157,7 @@ def __set_custom_moods(self, json_file: str) -> bool: Should have five keys: ``rad``, ``good``, ``neutral``, ``bad`` and ``awful``. Each of those keys should hold an array of any number of strings indicating various moods. **Example**: ``[{"good": ["good"]},...]`` + :raises CannotAccessFileError: if any problems occur during accessing or decoding the JSON. :returns: success or failure to set """ exp_path = utils.expand_path(json_file) @@ -138,40 +165,41 @@ def __set_custom_moods(self, json_file: str) -> bool: with open(exp_path, encoding="UTF-8") as file: tmp_mood_set = json.load(file) except FileNotFoundError: - self.__logger.warning(ErrorMsg.print(ErrorMsg.FILE_MISSING, exp_path)) - return False + msg = ErrorMsg.print(ErrorMsg.FILE_MISSING, exp_path) + self.__logger.warning(msg) + raise CannotAccessFileError(msg) except PermissionError: - self.__logger.warning(ErrorMsg.print(ErrorMsg.PERMISSION_ERROR, exp_path)) - return False + msg = ErrorMsg.print(ErrorMsg.PERMISSION_ERROR, exp_path) + self.__logger.warning(msg) + raise CannotAccessFileError(msg) except json.JSONDecodeError: - self.__logger.warning(ErrorMsg.print(ErrorMsg.DECODE_ERROR, exp_path)) - return False + msg = ErrorMsg.print(ErrorMsg.DECODE_ERROR, exp_path) + self.__logger.warning(msg) + raise CannotAccessFileError(msg) # Try accessing each mood key to watch for KeyError if missing for mood_key in self.__known_moods.keys(): try: tmp_mood_set[mood_key] except KeyError: - self.__logger.warning(ErrorMsg.print(ErrorMsg.FILE_INCOMPLETE, exp_path)) - return False + msg = ErrorMsg.print(ErrorMsg.FILE_INCOMPLETE, exp_path) + self.__logger.warning(msg) + raise InvalidDataInFileError(msg) else: continue # At this point, we know each mood key is present so the dictionary is valid self.__known_moods = tmp_mood_set - return True def __process_file(self, filepath: str) -> bool: """ Validates CSV file and processes it into iterable rows. :param filepath: path to CSV to be read + :raises CannotAccessFileError: if any problems occur during accessing or decoding the CSV file. :returns: True if parsed > 0, False otherwise - :except FileNotFoundError: exits immediately with code 1 - :except PermissionError: exists immediately with code 1 - :except OSError: exists immediately with code 1 """ - if not self.has_custom_moods(): + if not self.custom_moods: self.__logger.info(ErrorMsg.print(ErrorMsg.STANDARD_MOODS_USED)) # Let's determine if the file can be opened @@ -180,16 +208,18 @@ def __process_file(self, filepath: str) -> bool: file = open(filepath, newline='', encoding='UTF-8') # File has not been found except FileNotFoundError: - self.__logger.critical(ErrorMsg.print(ErrorMsg.FILE_MISSING, filepath)) - sys.exit(1) # no point in continuing + msg = ErrorMsg.print(ErrorMsg.FILE_MISSING, filepath) + self.__logger.critical(msg) + raise CannotAccessFileError(msg) # Insufficient permissions to access the file except PermissionError: - self.__logger.critical(ErrorMsg.print(ErrorMsg.PERMISSION_ERROR, filepath)) - sys.exit(1) # no point in continuing + msg = ErrorMsg.print(ErrorMsg.PERMISSION_ERROR, filepath) + self.__logger.critical(msg) + raise CannotAccessFileError(msg) # Other error that makes it impossible to access the file except OSError: self.__logger.critical(OSError) - sys.exit(1) # no point in continuing + raise CannotAccessFileError # If the code reaches here, the program can access the file. # Now let's determine if the file's contents are actually usable @@ -202,8 +232,9 @@ def __process_file(self, filepath: str) -> bool: # if the parsing fails, exit immediately raw_lines = csv.DictReader(file, delimiter=',', quotechar='"', strict=True) except csv.Error: - self.__logger.critical(ErrorMsg.print(ErrorMsg.DECODE_ERROR, filepath)) - sys.exit(1) + msg = ErrorMsg.print(ErrorMsg.DECODE_ERROR, filepath) + self.__logger.critical(msg) + raise InvalidDataInFileError(msg) # Does it have all the fields? Push any missing field into an array for later reference # Even if only one column from the list below is missing in the CSV, exit immediately @@ -224,21 +255,21 @@ def __process_file(self, filepath: str) -> bool: if not missing_strings: self.__logger.debug(ErrorMsg.print(ErrorMsg.CSV_ALL_FIELDS_PRESENT)) else: - self.__logger.critical( - ErrorMsg.print( + msg = ErrorMsg.print( ErrorMsg.CSV_FIELDS_MISSING, ', '.join(missing_strings) # which ones are missing - e.g. "date, mood, note" - ) ) - sys.exit(1) + self.__logger.critical(msg) + raise InvalidDataInFileError(msg) # Does it have any rows besides the header? # If the file is empty or only has column headers, exit immediately try: next(raw_lines) except StopIteration: - self.__logger.critical(ErrorMsg.print(ErrorMsg.FILE_EMPTY, filepath)) - sys.exit(1) + msg = ErrorMsg.print(ErrorMsg.FILE_EMPTY, filepath) + self.__logger.critical(msg) + raise InvalidDataInFileError(msg) # If the code has reached this point and has not exited, it means both file and contents have to be ok # Processing @@ -246,7 +277,10 @@ def __process_file(self, filepath: str) -> bool: lines_parsed = 0 for line in raw_lines: line: dict[str] - lines_parsed += self.__process_line(line) + try: + lines_parsed += self.__process_line(line) + except MissingValuesInRowError: + pass self.__logger.info(ErrorMsg.print(ErrorMsg.COUNT_ROWS, str(lines_parsed), filepath)) @@ -254,23 +288,36 @@ def __process_file(self, filepath: str) -> bool: return bool(lines_parsed) # TODO: I guess it is more pythonic to raise exceptions than return False if I cannot complete the task + # TODO: this has to be tested # https://eli.thegreenplace.net/2008/08/21/robust-exception-handling/ def __process_line(self, line: dict[str]) -> bool: """ Goes row-by-row and passes the content to objects specialised in handling it from a journaling perspective. + :raises MissingValuesInRowError: if the row in CSV lacks enough commas to create 8 cells. It signals a problem. :param line: a dictionary with values from the currently processed CSV line :return: True if all columns had values for this CSV ``line``, False otherwise """ # Does each of the 8 columns have values for this row? if len(line) < 8: - # Even if rows are missing some fields, continue parsing, but log it as a warning - self.__logger.warning(ErrorMsg.print(ErrorMsg.FILE_INCOMPLETE, str(line))) - return False + # Oops, not enough values on this row, the file might be corrupted? + msg = ErrorMsg.print(ErrorMsg.FILE_INCOMPLETE, str(line)) + self.__logger.warning(msg) + raise MissingValuesInRowError(msg) else: # Let DatedEntriesGroup handle the rest and increment the counter (True == 1) - return self.access_date(line["full_date"]).create_dated_entry_from_row(line, known_moods=self.__known_moods) + try: + self.access_date(line["full_date"]).create_dated_entry_from_row(line, known_moods=self.__known_moods) + except: + return False + else: + return True def access_date(self, target_date: str) -> DatedEntriesGroup: + """ + Accesses an already existing or creates a new :class:`DatedEntriesGroup` for the specified ``target_date``. + :param target_date: the date for which a unique :class:`DatedEntriesGroup` object should be created or accessed. + :return: reference to :class:`DatedEntriesGroup` object + """ date_obj = DatedEntriesGroup(target_date) # have you already filed this date? @@ -279,6 +326,6 @@ def access_date(self, target_date: str) -> DatedEntriesGroup: self.__logger.debug(ErrorMsg.print(ErrorMsg.OBJECT_FOUND, target_date)) else: self.__logger.debug(ErrorMsg.print(ErrorMsg.OBJECT_NOT_FOUND, target_date)) - self.__known_dates[date_obj.get_uid()] = date_obj + self.__known_dates[date_obj.uid] = date_obj return date_obj diff --git a/main.py b/main.py index 3ae21ef..391a2e3 100755 --- a/main.py +++ b/main.py @@ -1,5 +1,7 @@ """Parse a Daylio CSV into an Obsidian-compatible .MD file""" import logging +import sys + from config import options from librarian import Librarian @@ -8,7 +10,7 @@ # Compile global settings # --- # Read arguments from console and update the global_settings accordingly -options.get_console().parse_console() +options.get_console().parse_console(sys.argv[1:]) # [1:] skips the program name, such as ["foo.py", ...] # And now let's start processing # --- diff --git a/utils.py b/utils.py index 4a13785..8ef7df3 100755 --- a/utils.py +++ b/utils.py @@ -12,8 +12,8 @@ class ErrorMsg(errors.ErrorMsgBase): class Core: - def __init__(self): - self.__uid = None + def __init__(self, uid): + self.__uid = uid def __bool__(self): return self.__uid is not None @@ -21,12 +21,9 @@ def __bool__(self): def __str__(self): return str(self.__uid) - # TODO: These are supposed to be pythonic setters, not this imitation - def set_uid(self, value): - self.__uid = value - - def get_uid(self): - return str(self.__uid) + @property + def uid(self): + return self.__uid class CustomException(Exception): @@ -58,8 +55,9 @@ def expand_path(path): Expand all %variables%, ~/home-directories and relative parts in the path. Return the expanded path. It does not use os.path.abspath() because it treats current script directory as root. """ - # Converts the filepath to an absolute path and then expands the tilde (~) character to the user's home directory - return os.path.expanduser( - # Expands environment variables in the path, such as %appdata% - os.path.expandvars(path) + # Gets full path, resolving things like ../ + return os.path.realpath( + # Expands the tilde (~) character to the user's home directory + os.path.expanduser(path) ) + From 870266c1f392a21d4c2dee9fc64b72b3818037d3 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Sat, 6 Jan 2024 14:02:46 +0100 Subject: [PATCH 08/40] Librarian is now usable in testing --- _tests/test_dated_entries_group.py | 83 ++++++++++++----------- _tests/test_librarian.py | 105 +++++++++++++++++++---------- dated_entries_group.py | 1 + dated_entry.py | 17 ++--- errors.py | 61 ++++++++--------- librarian.py | 40 ++++++++--- main.py | 4 +- 7 files changed, 181 insertions(+), 130 deletions(-) diff --git a/_tests/test_dated_entries_group.py b/_tests/test_dated_entries_group.py index a07721a..e38f31e 100644 --- a/_tests/test_dated_entries_group.py +++ b/_tests/test_dated_entries_group.py @@ -1,15 +1,35 @@ from unittest import TestCase + +import dated_entries_group from dated_entries_group import DatedEntriesGroup class TestDate(TestCase): def setUp(self): self.sample_date = DatedEntriesGroup("2011-10-10") + self.sample_date.create_dated_entry_from_row( + { + "time": "10:00 AM", + "mood": "vaguely ok", + "activities": "", + "note_title": "", + "note": "" + } + ) + self.sample_date.create_dated_entry_from_row( + { + "time": "9:30 PM", + "mood": "awful", + "activities": "", + "note_title": "", + "note": "" + } + ) def test_get_date(self): - self.assertEqual(DatedEntriesGroup("2023-10-15").uid, "2023-10-15") - self.assertEqual(DatedEntriesGroup("2019-5-9").uid, "2019-5-9") - self.assertEqual(DatedEntriesGroup("2023-11-25").uid, "2023-11-25") + self.assertEqual(str(DatedEntriesGroup("2023-10-15")), "2023-10-15") + self.assertEqual(str(DatedEntriesGroup("2019-5-9")), "2019-5-9") + self.assertEqual(str(DatedEntriesGroup("2023-11-25")), "2023-11-25") self.assertRaises(ValueError, DatedEntriesGroup, "00-") self.assertRaises(ValueError, DatedEntriesGroup, "2199-32-32") @@ -48,30 +68,30 @@ def test_access_dated_entry(self): - former will raise ValueError if time is invalid - latter will raise KeyError if time is invalid """ - self.assertEqual(self.sample_date.access_dated_entry("10:00 AM").uid, "10:00 AM") - self.assertEqual(self.sample_date.access_dated_entry("9:30 PM").uid, "9:30 PM") + self.assertEqual(str(self.sample_date.access_dated_entry("10:00 AM")), "10:00 AM") + self.assertEqual(str(self.sample_date.access_dated_entry("9:30 PM")), "9:30 PM") # Test cases for 12-hour format - self.assertRaises(ValueError, self.sample_date.access_dated_entry, "2: AM") # Invalid format - self.assertRaises(ValueError, self.sample_date.access_dated_entry, "15:45 PM") # Invalid hour (more than 12) + self.assertRaises(dated_entries_group.DatedEntryMissingError, self.sample_date.access_dated_entry, "2: AM") # Invalid format + self.assertRaises(dated_entries_group.DatedEntryMissingError, self.sample_date.access_dated_entry, "15:45 PM") # Invalid hour (more than 12) # noinspection SpellCheckingInspection - self.assertRaises(ValueError, self.sample_date.access_dated_entry, "11:30 XM") # Invalid meridiem indicator + self.assertRaises(dated_entries_group.DatedEntryMissingError, self.sample_date.access_dated_entry, "11:30 XM") # Invalid meridiem indicator # Test cases for 24-hour format - self.assertRaises(ValueError, self.sample_date.access_dated_entry, "25:15") # Invalid hour (more than 24) - self.assertRaises(ValueError, self.sample_date.access_dated_entry, "14:45 PM") + self.assertRaises(dated_entries_group.DatedEntryMissingError, self.sample_date.access_dated_entry, "25:15") # Invalid hour (more than 24) + self.assertRaises(dated_entries_group.DatedEntryMissingError, self.sample_date.access_dated_entry, "14:45 PM") # noinspection SpellCheckingInspection - self.assertRaises(ValueError, self.sample_date.access_dated_entry, + self.assertRaises(dated_entries_group.DatedEntryMissingError, self.sample_date.access_dated_entry, "03:20 XM") # Invalid meridiem indicator in 24-hour format # Test cases with invalid characters # noinspection SpellCheckingInspection - self.assertRaises(ValueError, self.sample_date.access_dated_entry, "/ASDFVDJU\\") # Invalid characters + self.assertRaises(dated_entries_group.DatedEntryMissingError, self.sample_date.access_dated_entry, "/ASDFVDJU\\") # Invalid characters # Test cases with incomplete time information - self.assertRaises(ValueError, self.sample_date.access_dated_entry, "2022-1") # Incomplete time information - self.assertRaises(ValueError, self.sample_date.access_dated_entry, "12:") # Incomplete time information - self.assertRaises(ValueError, self.sample_date.access_dated_entry, ":30") # Incomplete time information + self.assertRaises(dated_entries_group.DatedEntryMissingError, self.sample_date.access_dated_entry, "2022-1") # Incomplete time information + self.assertRaises(dated_entries_group.DatedEntryMissingError, self.sample_date.access_dated_entry, "12:") # Incomplete time information + self.assertRaises(dated_entries_group.DatedEntryMissingError, self.sample_date.access_dated_entry, ":30") # Incomplete time information def test_get_known_dated_entries(self): """ @@ -81,48 +101,31 @@ def test_get_known_dated_entries(self): - former will raise ValueError if time is invalid - latter will raise KeyError if time is invalid """ - self.sample_date.access_dated_entry("11:11") - self.sample_date.access_dated_entry("12:12") - self.sample_date.access_dated_entry("13:13") - - self.assertEqual(self.sample_date.known_entries_from_this_day["11:11"].uid, "11:11") - self.assertEqual(self.sample_date.known_entries_from_this_day["12:12"].uid, "12:12") - self.assertEqual(self.sample_date.known_entries_from_this_day["13:13"].uid, "13:13") + self.assertEqual(str(self.sample_date.known_entries_from_this_day["9:30 PM"]), "9:30 PM") + self.assertEqual(str(self.sample_date.known_entries_from_this_day["10:00 AM"]), "10:00 AM") self.assertRaises(KeyError, lambda: self.sample_date.known_entries_from_this_day["23:00"]) - self.assertRaises(KeyError, lambda: self.sample_date.known_entries_from_this_day["9:30 PM"]) self.assertRaises(KeyError, lambda: self.sample_date.known_entries_from_this_day["11:50 AM"]) def test_truthiness_of_dated_entries_group(self): """ DatedEntriesGroup should be truthy if it has a valid UID and has any known entries. """ - self.sample_date.access_dated_entry("11:11") - self.sample_date.access_dated_entry("12:12") - self.sample_date.access_dated_entry("13:13") - - self.assertTrue(self.sample_date) + self.assertGreater(len(self.sample_date.known_entries_from_this_day), 0) def test_falseness_of_dated_entries_group(self): """ DatedEntriesGroup should be falsy if it has a valid UID but no known entries. """ - self.assertFalse(self.sample_date) + another_day = DatedEntriesGroup("2019-09-12") + self.assertEqual(len(another_day.known_entries_from_this_day), 0) + self.assertFalse(another_day.known_entries_from_this_day) def test_no_duplicate_entries_created(self): """ DatedEntriesGroup should return the already existing entry if it is known, instead of creating a duplicate. """ - obj = self.sample_date.access_dated_entry("11:11") - obj.note = "I already exist, see?" - - self.assertEqual(self.sample_date.access_dated_entry("11:11").note, obj.note) + pass def test_retrieve_known_entries(self): - obj1 = self.sample_date.access_dated_entry("11:11") - obj2 = self.sample_date.access_dated_entry("12:12") - obj3 = self.sample_date.access_dated_entry("13:13") - - self.assertEqual(self.sample_date.known_entries_from_this_day["11:11"], obj1) - self.assertEqual(self.sample_date.known_entries_from_this_day["12:12"], obj2) - self.assertEqual(self.sample_date.known_entries_from_this_day["13:13"], obj3) + pass diff --git a/_tests/test_librarian.py b/_tests/test_librarian.py index eea4efe..73d09ad 100644 --- a/_tests/test_librarian.py +++ b/_tests/test_librarian.py @@ -1,5 +1,3 @@ -import csv -import json from unittest import TestCase import librarian @@ -12,66 +10,99 @@ class TestLibrarian(TestCase): The Librarian is responsible for parsing files and outputting the final journal. We use internal class methods to check proper handling of data throughout the process. """ - def test_set_custom_moods(self): - """ - Pass faulty moods and see if it fails as expected. - """ - self.assertRaises(librarian.CannotAccessFileError, Librarian("sheet-2-corrupted-bytes.csv")) - self.assertFalse(librarian.CannotAccessFileError, Librarian("sheet-3-wrong-format.txt")) - self.assertFalse(librarian.CannotAccessFileError, Librarian("sheet-4-no-extension.csv")) - self.assertFalse(librarian.CannotAccessFileError, Librarian("incomplete-moods.json")) + def test_valid_journal_files(self): + self.assertTrue(Librarian("sheet-1-valid-data.csv")) - def test_pass_file(self): + def test_wrong_journal_files(self): """ - Pass some faulty files at the librarian and see if it exists. - There is no point in continuing the script if a crucial CSV file is faulty. + Pass faulty files and see if it fails as expected. """ + self.assertRaises(librarian.CannotAccessFileError, Librarian, "sheet-2-corrupted-bytes.csv") + self.assertRaises(librarian.CannotAccessFileError, Librarian, "sheet-3-wrong-format.txt") + self.assertRaises(librarian.CannotAccessFileError, Librarian, "sheet-4-no-extension.csv") + self.assertRaises(librarian.CannotAccessFileError, Librarian, "sheet-5-missing-file.csv") + self.assertRaises(librarian.CannotAccessFileError, Librarian, "sheet-6-empty-file.csv") + # TODO: maybe generate corrupted_sheet and wrong_format during runner setup in workflow mode? # dd if=/dev/urandom of="$corrupted_file" bs=1024 count=10 # generates random bytes and writes them into a given file - self.assertRaises(csv.Error, Librarian, "sheet-2-corrupted-bytes.csv") - self.assertRaises(csv.Error, Librarian, "sheet-3-wrong-format.txt") - self.assertRaises(csv.Error, Librarian, "sheet-4-no-extension") - self.assertRaises(FileNotFoundError, Librarian, "sheet-5-missing-file.csv") - self.assertRaises(StopIteration, Librarian, "sheet-6-empty-file.csv") + # TODO: make this file locked during runner workflow with chmod 600 - self.assertRaises(PermissionError, Librarian, "locked-dir/locked_file.csv") + self.assertRaises(librarian.CannotAccessFileError, Librarian, "locked-dir/locked_file.csv") + + def test_valid_custom_moods(self): + self.assertTrue(Librarian("sheet-1-valid-data.csv", "../moods.json")) - def test_access_date(self): + def test_wrong_custom_moods(self): """ - accessDate() should: - - return True if lib contains Date obj, and return obj - - return False if lib does not contain Date obj, and return empty obj - - throw ValueError if the string does not follow day format + Pass faulty moods and see if it fails as expected. + """ + self.assertRaises( + librarian.CannotAccessCustomMoodsError, + Librarian, "sheet-1-valid-data.csv", "_tests/output-results", "incomplete-moods.json" + ) + + def test_valid_access_dates(self): + """ + All the following dates exist in the sheet-1-valid-data.csv and should be accessible by ``lib``. """ + # When lib = Librarian( path_to_file="sheet-1-valid-data.csv", path_to_moods="../moods.json" ) - # obj is truthy if it has uid and at least one child DatedEntry (debatable) + + # Then self.assertTrue(lib.access_date("2022-10-25")) self.assertTrue(lib.access_date("2022-10-26")) self.assertTrue(lib.access_date("2022-10-27")) self.assertTrue(lib.access_date("2022-10-30")) - # obj is falsy if the object has no child DatedEntry (debatable) - self.assertRaises(FileNotFoundError, lib.access_date, "2022-10-21") - self.assertRaises(FileNotFoundError, lib.access_date, "2022-10-20") - self.assertRaises(FileNotFoundError, lib.access_date, "2017-10-20") - self.assertRaises(FileNotFoundError, lib.access_date, "1819-10-20") + def test_wrong_access_dates(self): + """ + **None** of the following dates exist in the sheet-1-valid-data.csv and should **NOT** be accessible by ``lib``. + """ + # When + lib = Librarian( + path_to_file="sheet-1-valid-data.csv", + path_to_moods="../moods.json" + ) + + # Then can access valid dates, even if they weren't in the file + self.assertTrue(lib.access_date("2022-10-21")) + self.assertTrue(lib.access_date("2022-10-20")) + self.assertTrue(lib.access_date("2022-10-2")) + self.assertTrue(lib.access_date("1999-10-22")) + + # But once I try to access the actual entries attached to those dates, they should be empty + self.assertFalse(lib.access_date("2022-10-21").known_entries_from_this_day) + self.assertFalse(lib.access_date("2022-10-20").known_entries_from_this_day) + self.assertFalse(lib.access_date("2022-10-2").known_entries_from_this_day) + self.assertFalse(lib.access_date("2022-10-22").known_entries_from_this_day) + self.assertFalse(lib.access_date("1999-1-1").known_entries_from_this_day) self.assertRaises(ValueError, lib.access_date, "ABC") self.assertRaises(ValueError, lib.access_date, "2022") - self.assertRaises(ValueError, lib.access_date, "1999-1-1") self.assertRaises(ValueError, lib.access_date, "12:00 AM") + self.assertRaises(ValueError, lib.access_date, "1795-12-05") # year range suspicious - def test_has_custom_moods(self): + def test_custom_moods_when_passed_correctly(self): self.assertTrue(Librarian( path_to_file="sheet-1-valid-data.csv", path_to_moods="../moods.json" ).custom_moods) - self.assertFalse(Librarian("sheet-1-valid-data.csv")) - self.assertRaises(json.JSONDecodeError, Librarian, "sheet-1-valid-data.csv", "empty_sheet.csv") - self.assertRaises(FileNotFoundError, Librarian, "sheet-1-valid-data.csv", "missing-file.json") - self.assertRaises(PermissionError, Librarian, "sheet-1-valid-data.csv", "locked-dir/locked_file.csv") - self.assertRaises(KeyError, Librarian, "sheet-1-valid-data.csv", "incomplete-moods.json") + + def test_custom_moods_when_not_passed(self): + self.assertFalse(Librarian( + path_to_file="sheet-1-valid-data.csv" + ).custom_moods) + + def test_custom_moods_when_json_invalid(self): + self.assertRaises(librarian.CannotAccessCustomMoodsError, + Librarian, "sheet-1-valid-data.csv", "_tests/output-results/", "empty_sheet.csv") + self.assertRaises(librarian.CannotAccessCustomMoodsError, + Librarian, "sheet-1-valid-data.csv", "_tests/output-results/", "missing-file.json") + self.assertRaises(librarian.CannotAccessCustomMoodsError, + Librarian, "sheet-1-valid-data.csv", "_tests/output-results/", "locked-dir/locked_file.csv") + self.assertRaises(librarian.CannotAccessCustomMoodsError, + Librarian, "sheet-1-valid-data.csv", "_tests/output-results/", "incomplete-moods.json") diff --git a/dated_entries_group.py b/dated_entries_group.py index c11dd55..f2afb97 100644 --- a/dated_entries_group.py +++ b/dated_entries_group.py @@ -136,6 +136,7 @@ def create_dated_entry_from_row(self, except ValueError: raise ValueError else: + self.__known_entries_for_this_date[str(entry.uid)] = entry return entry def access_dated_entry(self, time: str) -> DatedEntry: diff --git a/dated_entry.py b/dated_entry.py index 18153a9..3fdce94 100644 --- a/dated_entry.py +++ b/dated_entry.py @@ -186,14 +186,15 @@ def __init__(self, if len(mood) == 0 or mood is None: raise ValueError("Cannot create object without valid Mood attribute") else: - mood_is_in_dictionary = False - for i, (_, this_group) in enumerate(known_moods.items()): - if mood in this_group: - mood_is_in_dictionary = True - break - if not mood_is_in_dictionary: - self.__logger.warning(ErrorMsg.print(ErrorMsg.INVALID_MOOD, mood)) - # Assign it anyway. Warning is enough. + if known_moods is True: + mood_is_in_dictionary = False + for i, (_, this_group) in enumerate(known_moods.items()): + if mood in this_group: + mood_is_in_dictionary = True + break + if not mood_is_in_dictionary: + self.__logger.warning(ErrorMsg.print(ErrorMsg.INVALID_MOOD, mood)) + # Assign it anyway. Warning is enough. self.__mood = mood # Processing other, optional properties diff --git a/errors.py b/errors.py index af2f90d..a3107c5 100644 --- a/errors.py +++ b/errors.py @@ -1,46 +1,43 @@ import logging +import sys -# Formatter with fancy additions - colour and bold support - used in the console logger handler -class FancyFormatter(logging.Formatter): - grey = "\x1b[38;20m" - yellow = "\x1b[33;20m" - red = "\x1b[31;20m" - bold_red = "\x1b[31;1m" - reset = "\x1b[0m" - # TODO: seems like format does not apply, only colouring works - format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)" +class ColorHandler(logging.StreamHandler): + # https://en.wikipedia.org/wiki/ANSI_escape_code#Colors + GRAY8 = "38;5;8" + GRAY7 = "38;5;7" + ORANGE = "33" + RED = "31" + WHITE = "0" - FORMATS = { - logging.DEBUG: grey + format + reset, - logging.INFO: grey + format + reset, - logging.WARNING: yellow + format + reset, - logging.ERROR: red + format + reset, - logging.CRITICAL: bold_red + format + reset - } + def emit(self, record): + # We don't use white for any logging, to help distinguish from user print statements + level_color_map = { + logging.DEBUG: self.GRAY8, + logging.INFO: self.GRAY7, + logging.WARNING: self.ORANGE, + logging.ERROR: self.RED, + logging.CRITICAL: f"1;{self.RED}", # Bold for critical errors + } - def format(self, record): - log_fmt = self.FORMATS.get(record.levelno) - formatter = logging.Formatter(log_fmt) - return formatter.format(record) + csi = f"{chr(27)}[" # control sequence introducer + color = level_color_map.get(record.levelno, self.WHITE) + # Apply the formatter to format the log message + formatted_msg = self.format(record) -# Common logging configuration for the root logger -# noinspection SpellCheckingInspection -logging.basicConfig(level=logging.DEBUG) + self.stream.write(f"{csi}{color}m{formatted_msg}{csi}m\n") -# Create a file handler for the root logger -file_log_handler = logging.FileHandler("debug.log") -file_log_handler.setLevel(logging.DEBUG) -file_log_handler.setFormatter(FancyFormatter()) # Create a console handler for the root logger -console_log_handler = logging.StreamHandler() -console_log_handler.setLevel(logging.WARNING) -console_log_handler.setFormatter(FancyFormatter()) +# noinspection SpellCheckingInspection +console_log_handler = ColorHandler(sys.stdout) +console_log_handler.setLevel(logging.DEBUG) + +formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)") +console_log_handler.setFormatter(formatter) # Add the handlers to the root logger -logging.getLogger().addHandler(file_log_handler) logging.getLogger().addHandler(console_log_handler) @@ -52,7 +49,7 @@ class ErrorMsgBase: """ # some common errors have been raised in scope into base class instead of child classes OBJECT_FOUND = "{}-class object found." - OBJECT_NOT_FOUND = "{} object not found. Creating and returning to caller." + OBJECT_NOT_FOUND = "{} object not found." FAULTY_OBJECT = "Called a {}-class object method but the object has been incorrectly instantiated." WRONG_VALUE = "Received {}, expected {}." diff --git a/librarian.py b/librarian.py index 40237d8..b2e45db 100644 --- a/librarian.py +++ b/librarian.py @@ -13,7 +13,6 @@ import csv import json import logging -import sys from typing import List from config import options @@ -56,7 +55,7 @@ class ErrorMsg(errors.ErrorMsgBase): DECODE_ERROR = "Error while decoding {}" NOT_A_FILE = "{} is not a file." CSV_ALL_FIELDS_PRESENT = "All expected columns are present in the CSV file columns." - CSV_FIELDS_MISSING = "The following expected columns are missing: " + CSV_FIELDS_MISSING = "The following expected columns are missing: {}" COUNT_ROWS = "Found {} rows of data in {}." @@ -79,6 +78,14 @@ class CannotAccessFileError(utils.CustomException): """The file could not be accessed.""" +class CannotAccessJournalError(CannotAccessFileError): + """The journal CSV could not be accessed or parsed.""" + + +class CannotAccessCustomMoodsError(CannotAccessFileError): + """The custom moods JSON could not be accessed or parsed.""" + + class InvalidDataInFileError(utils.CustomException): """The file does not follow the expected structure.""" @@ -129,15 +136,15 @@ def __init__(self, if path_to_moods is not None: try: self.__set_custom_moods(path_to_moods) - except CannotAccessFileError: - pass + except (CannotAccessFileError, InvalidDataInFileError): + raise CannotAccessCustomMoodsError # 2. Access the CSV file and get all the rows with content # then pass the data to specialised data objects that can handle them in a structured way try: self.__process_file(path_to_file) - except CannotAccessFileError: - raise CannotAccessFileError + except (CannotAccessFileError, InvalidDataInFileError): + raise CannotAccessJournalError @property def custom_moods(self) -> dict[str, List[str]] | None: @@ -196,7 +203,8 @@ def __process_file(self, filepath: str) -> bool: Validates CSV file and processes it into iterable rows. :param filepath: path to CSV to be read - :raises CannotAccessFileError: if any problems occur during accessing or decoding the CSV file. + :raises CannotAccessFileError: if any problems occur during accessing the CSV file. + :raises InvalidDataInFileError: if any problems occur during parsing the CSV file. :returns: True if parsed > 0, False otherwise """ if not self.custom_moods: @@ -249,15 +257,25 @@ def __process_file(self, filepath: str) -> bool: "note_title", "note" ] - missing_strings = [expected_field for expected_field in expected_fields if - expected_field not in raw_lines.fieldnames] + + # Let's have a look at what columns we have in the parsed CSV + # It seems that even files with random bytes occasionally pass through previous checks with no errors + # Therefore this 'try' block is also necessary, we do not know if the entire file is now fault-free + try: + missing_strings = [ + expected_field for expected_field in expected_fields if expected_field not in raw_lines.fieldnames + ] + except (csv.Error, UnicodeDecodeError): + msg = ErrorMsg.print(ErrorMsg.DECODE_ERROR, filepath) + self.__logger.critical(msg) + raise InvalidDataInFileError(msg) if not missing_strings: self.__logger.debug(ErrorMsg.print(ErrorMsg.CSV_ALL_FIELDS_PRESENT)) else: msg = ErrorMsg.print( - ErrorMsg.CSV_FIELDS_MISSING, - ', '.join(missing_strings) # which ones are missing - e.g. "date, mood, note" + ErrorMsg.CSV_FIELDS_MISSING, + ', '.join(missing_strings) # which ones are missing - e.g. "date, mood, note" ) self.__logger.critical(msg) raise InvalidDataInFileError(msg) diff --git a/main.py b/main.py index 391a2e3..a13285e 100755 --- a/main.py +++ b/main.py @@ -10,8 +10,8 @@ # Compile global settings # --- # Read arguments from console and update the global_settings accordingly -options.get_console().parse_console(sys.argv[1:]) # [1:] skips the program name, such as ["foo.py", ...] +options.parse_console(sys.argv[1:]) # [1:] skips the program name, such as ["foo.py", ...] # And now let's start processing # --- -Librarian(path_to_file=options.settings.filepath) +Librarian(path_to_file=options.filepath) From 1b57ec32176f005892f58ac02415896dea6c8ac4 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Sat, 6 Jan 2024 16:38:38 +0100 Subject: [PATCH 09/40] deleted unnecessary imports from dated_entries_group.py --- dated_entries_group.py | 1 - 1 file changed, 1 deletion(-) diff --git a/dated_entries_group.py b/dated_entries_group.py index f2afb97..008583c 100644 --- a/dated_entries_group.py +++ b/dated_entries_group.py @@ -13,7 +13,6 @@ from typing import List from dated_entry import DatedEntry import utils -from config import options class DatedEntryMissingError(utils.CustomException): From d93bf0ad2c272674084549bfe1c4905a614852b0 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Sat, 6 Jan 2024 16:56:43 +0100 Subject: [PATCH 10/40] deleted __hash - superfluous if you can call hash() dunder function anyway --- dated_entries_group.py | 15 ++++++++------- utils.py | 3 +++ 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/dated_entries_group.py b/dated_entries_group.py index 008583c..2c42b1f 100644 --- a/dated_entries_group.py +++ b/dated_entries_group.py @@ -45,7 +45,7 @@ def __init__(self, string: str): :raises InvalidDateError: if :param:`string` is not a valid date (for example the month number > 12) :param string: on which entries have been created (`YYYY-MM-DD`) """ - self.__logger = logging.getLogger(self.__class__.__name__) + # self.__logger = logging.getLogger(self.__class__.__name__) # does it have a valid format YYYY-MM-DD valid_date_pattern = re.compile(r'^\d{4}-\d{1,2}-\d{1,2}$') @@ -80,25 +80,26 @@ class DatedEntriesGroup(utils.Core): However, the scribe knows only his papers. The papers contain all entries written that particular date. Truthy if it knows at least one :class:`DatedEntry` made on this :class:`Date`. - :raises ValueError: if the date string is deemed invalid by :class:`Date` + :raises InvalidDateError: if the date string is deemed invalid by :class:`Date` """ def __init__(self, date): self.__logger = logging.getLogger(self.__class__.__name__) + # Try parsing the date and assigning it as your identification (uid) try: super().__init__(Date(date)) + + # Date is no good? except InvalidDateError: msg = ErrorMsg.print(ErrorMsg.WRONG_VALUE, date, "YYYY-MM-DD") self.__logger.warning(msg) - raise ValueError(msg) + raise InvalidDateError(msg) + + # All good - initialise else: - self.__hash = hash(self.uid) self.__known_entries_for_this_date = {} - def __hash__(self): - return self.__hash - def create_dated_entry_from_row(self, line: dict[str], known_moods: dict[str, List[str]] = None) -> dated_entry.DatedEntry: diff --git a/utils.py b/utils.py index 8ef7df3..594f1b0 100755 --- a/utils.py +++ b/utils.py @@ -21,6 +21,9 @@ def __bool__(self): def __str__(self): return str(self.__uid) + def __hash__(self): + return hash(self.uid) + @property def uid(self): return self.__uid From fa0a2dfc0f7349247196778305f10ba833356608 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Sat, 6 Jan 2024 17:23:02 +0100 Subject: [PATCH 11/40] added more test scenarios for test_dated_entries_group.py --- _tests/test_dated_entries_group.py | 111 ++++++++++++++++++----------- dated_entries_group.py | 7 ++ 2 files changed, 77 insertions(+), 41 deletions(-) diff --git a/_tests/test_dated_entries_group.py b/_tests/test_dated_entries_group.py index e38f31e..bff525a 100644 --- a/_tests/test_dated_entries_group.py +++ b/_tests/test_dated_entries_group.py @@ -1,12 +1,15 @@ from unittest import TestCase import dated_entries_group -from dated_entries_group import DatedEntriesGroup +from dated_entries_group import DatedEntriesGroup, InvalidDateError, \ + DatedEntryMissingError, TriedCreatingDuplicateDatedEntryError class TestDate(TestCase): def setUp(self): + # Create a sample date self.sample_date = DatedEntriesGroup("2011-10-10") + # Append two sample entries to that day self.sample_date.create_dated_entry_from_row( { "time": "10:00 AM", @@ -26,72 +29,98 @@ def setUp(self): } ) - def test_get_date(self): - self.assertEqual(str(DatedEntriesGroup("2023-10-15")), "2023-10-15") - self.assertEqual(str(DatedEntriesGroup("2019-5-9")), "2019-5-9") - self.assertEqual(str(DatedEntriesGroup("2023-11-25")), "2023-11-25") + def test_creating_entries_from_row(self): + """ + Test whether you can successfully create :class:`DatedEntry` objects from this builder class. + """ + my_date = DatedEntriesGroup("1999-05-07") + my_date.create_dated_entry_from_row( + { + "time": "10:00 AM", + "mood": "vaguely ok", + "activities": "", + "note_title": "", + "note": "" + } + ) + with self.assertRaises(TriedCreatingDuplicateDatedEntryError): + my_date.create_dated_entry_from_row( + { + "time": "10:00 AM", + "mood": "vaguely ok", + "activities": "", + "note_title": "", + "note": "" + } + ) + + def test_create_dated_entries_groups(self): + """ + Try to instantiate an object of :class:`DatedEntriesGroup` with either valid or invalid dates + """ + self.assertEqual(DatedEntriesGroup("2023-10-15").date, "2023-10-15") + self.assertEqual(DatedEntriesGroup("2019-5-9").date, "2019-5-9") + self.assertEqual(DatedEntriesGroup("2023-11-25").date, "2023-11-25") - self.assertRaises(ValueError, DatedEntriesGroup, "00-") - self.assertRaises(ValueError, DatedEntriesGroup, "2199-32-32") + self.assertRaises(InvalidDateError, DatedEntriesGroup, "00-") + self.assertRaises(InvalidDateError, DatedEntriesGroup, "2199-32-32") # Test cases with unconventional date formats - self.assertRaises(ValueError, DatedEntriesGroup, "2022/05/18") # Invalid separator - self.assertRaises(ValueError, DatedEntriesGroup, "2023_07_12") # Invalid separator - self.assertRaises(ValueError, DatedEntriesGroup, "1999.10.25") # Invalid separator + self.assertRaises(InvalidDateError, DatedEntriesGroup, "2022/05/18") # Invalid separator + self.assertRaises(InvalidDateError, DatedEntriesGroup, "2023_07_12") # Invalid separator + self.assertRaises(InvalidDateError, DatedEntriesGroup, "1999.10.25") # Invalid separator # Test cases with random characters in the date string - self.assertRaises(ValueError, DatedEntriesGroup, "2@#0$2-05-18") # Special characters in the year - self.assertRaises(ValueError, DatedEntriesGroup, "1987-0%4-12") # Special characters in the month - self.assertRaises(ValueError, DatedEntriesGroup, "2001-07-3*") # Special characters in the day + self.assertRaises(InvalidDateError, DatedEntriesGroup, "2@#0$2-05-18") # Special characters in the year + self.assertRaises(InvalidDateError, DatedEntriesGroup, "1987-0%4-12") # Special characters in the month + self.assertRaises(InvalidDateError, DatedEntriesGroup, "2001-07-3*") # Special characters in the day # Test cases with excessive spaces - self.assertRaises(ValueError, DatedEntriesGroup, " 2022-05-18 ") # Spaces around the date - self.assertRaises(ValueError, DatedEntriesGroup, "1999- 10-25") # Space after the month - self.assertRaises(ValueError, DatedEntriesGroup, " 2000-04 - 12 ") # Spaces within the date + self.assertRaises(InvalidDateError, DatedEntriesGroup, " 2022-05-18 ") # Spaces around the date + self.assertRaises(InvalidDateError, DatedEntriesGroup, "1999- 10-25") # Space after the month + self.assertRaises(InvalidDateError, DatedEntriesGroup, " 2000-04 - 12 ") # Spaces within the date # Test cases with mixed characters and numbers - self.assertRaises(ValueError, DatedEntriesGroup, "2k20-05-18") # Non-numeric characters in the year - self.assertRaises(ValueError, DatedEntriesGroup, "1999-0ne-25") # Non-numeric characters in the month - self.assertRaises(ValueError, DatedEntriesGroup, "2021-07-Two") # Non-numeric characters in the day + self.assertRaises(InvalidDateError, DatedEntriesGroup, "2k20-05-18") # Non-numeric characters in the year + self.assertRaises(InvalidDateError, DatedEntriesGroup, "1999-0ne-25") # Non-numeric characters in the month + self.assertRaises(InvalidDateError, DatedEntriesGroup, "2021-07-Two") # Non-numeric characters in the day # Test cases with missing parts of the date - self.assertRaises(ValueError, DatedEntriesGroup, "2022-05") # Missing day - self.assertRaises(ValueError, DatedEntriesGroup, "1987-09") # Missing day - self.assertRaises(ValueError, DatedEntriesGroup, "2001") # Missing month and day - self.assertRaises(ValueError, DatedEntriesGroup, "") # Empty string + self.assertRaises(InvalidDateError, DatedEntriesGroup, "2022-05") # Missing day + self.assertRaises(InvalidDateError, DatedEntriesGroup, "1987-09") # Missing day + self.assertRaises(InvalidDateError, DatedEntriesGroup, "2001") # Missing month and day + self.assertRaises(InvalidDateError, DatedEntriesGroup, "") # Empty string def test_access_dated_entry(self): """ - Difference between access_dated_entry(time) and get_known_dated_entries[time]: - - former will create missing entries, if time is valid - - latter will raise KeyError if the entry is missing, even if time is valid - - former will raise ValueError if time is invalid - - latter will raise KeyError if time is invalid + Uses the :class:`DatedEntryGroup` object from :func:`setUp` with sample entries. + Tries to either access existing entries through :func:`access_dated_entry` or missing ones. + Expected behaviour is for the :class:`DatedEntryGroup` to return the entry object if exists or raise exception. """ self.assertEqual(str(self.sample_date.access_dated_entry("10:00 AM")), "10:00 AM") self.assertEqual(str(self.sample_date.access_dated_entry("9:30 PM")), "9:30 PM") # Test cases for 12-hour format - self.assertRaises(dated_entries_group.DatedEntryMissingError, self.sample_date.access_dated_entry, "2: AM") # Invalid format - self.assertRaises(dated_entries_group.DatedEntryMissingError, self.sample_date.access_dated_entry, "15:45 PM") # Invalid hour (more than 12) + self.assertRaises(DatedEntryMissingError, self.sample_date.access_dated_entry, "2: AM") # <- no minutes + self.assertRaises(DatedEntryMissingError, self.sample_date.access_dated_entry, "15:45 PM") # <- above 12h + self.assertRaises(DatedEntryMissingError, self.sample_date.access_dated_entry, "14:45 PM") # <- above 12h # noinspection SpellCheckingInspection - self.assertRaises(dated_entries_group.DatedEntryMissingError, self.sample_date.access_dated_entry, "11:30 XM") # Invalid meridiem indicator + self.assertRaises(DatedEntryMissingError, self.sample_date.access_dated_entry, "11:30 XM") # <- wrong meridiem + # noinspection SpellCheckingInspection + self.assertRaises(DatedEntryMissingError, self.sample_date.access_dated_entry, "03:20 XM") # <- wrong meridiem # Test cases for 24-hour format - self.assertRaises(dated_entries_group.DatedEntryMissingError, self.sample_date.access_dated_entry, "25:15") # Invalid hour (more than 24) - self.assertRaises(dated_entries_group.DatedEntryMissingError, self.sample_date.access_dated_entry, "14:45 PM") - # noinspection SpellCheckingInspection - self.assertRaises(dated_entries_group.DatedEntryMissingError, self.sample_date.access_dated_entry, - "03:20 XM") # Invalid meridiem indicator in 24-hour format + self.assertRaises(DatedEntryMissingError, self.sample_date.access_dated_entry, "25:15") # <- above 24h + self.assertRaises(DatedEntryMissingError, self.sample_date.access_dated_entry, "11:78") # <- above 59m # Test cases with invalid characters # noinspection SpellCheckingInspection - self.assertRaises(dated_entries_group.DatedEntryMissingError, self.sample_date.access_dated_entry, "/ASDFVDJU\\") # Invalid characters + self.assertRaises(DatedEntryMissingError, self.sample_date.access_dated_entry, "/ASDFVDJU\\") - # Test cases with incomplete time information - self.assertRaises(dated_entries_group.DatedEntryMissingError, self.sample_date.access_dated_entry, "2022-1") # Incomplete time information - self.assertRaises(dated_entries_group.DatedEntryMissingError, self.sample_date.access_dated_entry, "12:") # Incomplete time information - self.assertRaises(dated_entries_group.DatedEntryMissingError, self.sample_date.access_dated_entry, ":30") # Incomplete time information + # Other test cases with incomplete time information + self.assertRaises(DatedEntryMissingError, self.sample_date.access_dated_entry, "2022-1") + self.assertRaises(DatedEntryMissingError, self.sample_date.access_dated_entry, "12:") + self.assertRaises(DatedEntryMissingError, self.sample_date.access_dated_entry, ":30") def test_get_known_dated_entries(self): """ diff --git a/dated_entries_group.py b/dated_entries_group.py index 2c42b1f..eda64f6 100644 --- a/dated_entries_group.py +++ b/dated_entries_group.py @@ -159,3 +159,10 @@ def access_dated_entry(self, time: str) -> DatedEntry: @property def known_entries_from_this_day(self): return self.__known_entries_for_this_date + + @property + def date(self): + """ + :return: String in the format of YYYY-MM-DD that identifies this specific object of :class:`DatedEntryGroup`. + """ + return str(self) From 80793bf98f92ee845e4cd9159e0c6b7ad86b2e15 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Sat, 6 Jan 2024 23:39:20 +0100 Subject: [PATCH 12/40] detached moods into a specialised class --- .idea/Obsidian-Daylio-Parser.iml | 2 +- .../inspectionProfiles/profiles_settings.xml | 1 + .idea/misc.xml | 5 +- _tests/test_dated_entries_group.py | 16 +- _tests/test_dated_entry.py | 2 +- _tests/test_librarian.py | 50 ++-- _tests/test_mood.py | 217 +++++++++++++++ dated_entries_group.py | 72 ++++- dated_entry.py | 68 +++-- entry/mood.py | 250 ++++++++++++++++++ errors.py | 6 +- librarian.py | 157 +++++------ 12 files changed, 708 insertions(+), 138 deletions(-) create mode 100644 _tests/test_mood.py create mode 100644 entry/mood.py diff --git a/.idea/Obsidian-Daylio-Parser.iml b/.idea/Obsidian-Daylio-Parser.iml index 5b0c423..a2dca91 100644 --- a/.idea/Obsidian-Daylio-Parser.iml +++ b/.idea/Obsidian-Daylio-Parser.iml @@ -6,7 +6,7 @@ <excludeFolder url="file://$MODULE_DIR$/_tests/locked-dir" /> <excludeFolder url="file://$MODULE_DIR$/_tests/output-results" /> </content> - <orderEntry type="jdk" jdkName="$USER_HOME$/miniconda3" jdkType="Python SDK" /> + <orderEntry type="inheritedJdk" /> <orderEntry type="sourceFolder" forTests="false" /> </component> <component name="PackageRequirementsSettings"> diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml index 105ce2d..dd4c951 100644 --- a/.idea/inspectionProfiles/profiles_settings.xml +++ b/.idea/inspectionProfiles/profiles_settings.xml @@ -1,5 +1,6 @@ <component name="InspectionProjectProfileManager"> <settings> + <option name="PROJECT_PROFILE" value="Default" /> <option name="USE_PROJECT_PROFILE" value="false" /> <version value="1.0" /> </settings> diff --git a/.idea/misc.xml b/.idea/misc.xml index 4e9c2aa..2a75f48 100644 --- a/.idea/misc.xml +++ b/.idea/misc.xml @@ -1,4 +1,7 @@ <?xml version="1.0" encoding="UTF-8"?> <project version="4"> - <component name="ProjectRootManager" version="2" project-jdk-name="$PROJECT_DIR$/../miniconda3" project-jdk-type="Python SDK" /> + <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8" project-jdk-type="Python SDK" /> + <component name="PythonCompatibilityInspectionAdvertiser"> + <option name="version" value="3" /> + </component> </project> \ No newline at end of file diff --git a/_tests/test_dated_entries_group.py b/_tests/test_dated_entries_group.py index bff525a..b6d475d 100644 --- a/_tests/test_dated_entries_group.py +++ b/_tests/test_dated_entries_group.py @@ -1,8 +1,8 @@ from unittest import TestCase -import dated_entries_group from dated_entries_group import DatedEntriesGroup, InvalidDateError, \ - DatedEntryMissingError, TriedCreatingDuplicateDatedEntryError + DatedEntryMissingError, TriedCreatingDuplicateDatedEntryError, \ + IncompleteDataRow class TestDate(TestCase): @@ -43,6 +43,7 @@ def test_creating_entries_from_row(self): "note": "" } ) + # This would be a duplicate from the one already created with self.assertRaises(TriedCreatingDuplicateDatedEntryError): my_date.create_dated_entry_from_row( { @@ -53,6 +54,17 @@ def test_creating_entries_from_row(self): "note": "" } ) + # This lacks the minimum required keys - time and mood - to function correctly + with self.assertRaises(IncompleteDataRow): + my_date.create_dated_entry_from_row( + { + "time": "5:00 PM", + "mood": "", + "activities": "", + "note_title": "", + "note": "" + } + ) def test_create_dated_entries_groups(self): """ diff --git a/_tests/test_dated_entry.py b/_tests/test_dated_entry.py index 021b813..710053a 100644 --- a/_tests/test_dated_entry.py +++ b/_tests/test_dated_entry.py @@ -41,7 +41,7 @@ def test_bare_minimum_dated_entries(self): bare_minimum_dated_entry = DatedEntry( time="1:49 AM", mood="vaguely ok", - known_moods={ + override_mood_set={ "neutral": ["vaguely ok"] } ) diff --git a/_tests/test_librarian.py b/_tests/test_librarian.py index 73d09ad..6f9d1c9 100644 --- a/_tests/test_librarian.py +++ b/_tests/test_librarian.py @@ -10,10 +10,10 @@ class TestLibrarian(TestCase): The Librarian is responsible for parsing files and outputting the final journal. We use internal class methods to check proper handling of data throughout the process. """ - def test_valid_journal_files(self): + def test_init_valid_csv(self): self.assertTrue(Librarian("sheet-1-valid-data.csv")) - def test_wrong_journal_files(self): + def test_init_invalid_csv(self): """ Pass faulty files and see if it fails as expected. """ @@ -30,18 +30,6 @@ def test_wrong_journal_files(self): # TODO: make this file locked during runner workflow with chmod 600 self.assertRaises(librarian.CannotAccessFileError, Librarian, "locked-dir/locked_file.csv") - def test_valid_custom_moods(self): - self.assertTrue(Librarian("sheet-1-valid-data.csv", "../moods.json")) - - def test_wrong_custom_moods(self): - """ - Pass faulty moods and see if it fails as expected. - """ - self.assertRaises( - librarian.CannotAccessCustomMoodsError, - Librarian, "sheet-1-valid-data.csv", "_tests/output-results", "incomplete-moods.json" - ) - def test_valid_access_dates(self): """ All the following dates exist in the sheet-1-valid-data.csv and should be accessible by ``lib``. @@ -58,6 +46,12 @@ def test_valid_access_dates(self): self.assertTrue(lib.access_date("2022-10-27")) self.assertTrue(lib.access_date("2022-10-30")) + # Check if get-item method of accessing date groups also works + self.assertTrue(lib["2022-10-25"]) + self.assertTrue(lib["2022-10-26"]) + self.assertTrue(lib["2022-10-27"]) + self.assertTrue(lib["2022-10-30"]) + def test_wrong_access_dates(self): """ **None** of the following dates exist in the sheet-1-valid-data.csv and should **NOT** be accessible by ``lib``. @@ -73,6 +67,8 @@ def test_wrong_access_dates(self): self.assertTrue(lib.access_date("2022-10-20")) self.assertTrue(lib.access_date("2022-10-2")) self.assertTrue(lib.access_date("1999-10-22")) + # this dict method should also work + self.assertTrue(lib["2005-01-19"]) # But once I try to access the actual entries attached to those dates, they should be empty self.assertFalse(lib.access_date("2022-10-21").known_entries_from_this_day) @@ -81,21 +77,33 @@ def test_wrong_access_dates(self): self.assertFalse(lib.access_date("2022-10-22").known_entries_from_this_day) self.assertFalse(lib.access_date("1999-1-1").known_entries_from_this_day) + # check if Librarian correctly raises ValueError when trying to check invalid dates self.assertRaises(ValueError, lib.access_date, "ABC") self.assertRaises(ValueError, lib.access_date, "2022") self.assertRaises(ValueError, lib.access_date, "12:00 AM") self.assertRaises(ValueError, lib.access_date, "1795-12-05") # year range suspicious + # CUSTOM AND STANDARD MOOD SETS + # ----------------------------- def test_custom_moods_when_passed_correctly(self): + """Pass a valid JSON file and see if it knows it has access to custom moods now.""" self.assertTrue(Librarian( path_to_file="sheet-1-valid-data.csv", path_to_moods="../moods.json" - ).custom_moods) + ).current_mood_set.has_custom_moods) def test_custom_moods_when_not_passed(self): + """Pass no moods and see if it know it only has standard moods available.""" self.assertFalse(Librarian( path_to_file="sheet-1-valid-data.csv" - ).custom_moods) + ).current_mood_set.has_custom_moods) + + def test_custom_moods_with_invalid_jsons(self): + """Pass faulty moods and see if it fails as expected.""" + self.assertRaises( + librarian.CannotAccessCustomMoodsError, + Librarian, "sheet-1-valid-data.csv", "_tests/output-results", "empty_sheet.csv" + ) def test_custom_moods_when_json_invalid(self): self.assertRaises(librarian.CannotAccessCustomMoodsError, @@ -104,5 +112,11 @@ def test_custom_moods_when_json_invalid(self): Librarian, "sheet-1-valid-data.csv", "_tests/output-results/", "missing-file.json") self.assertRaises(librarian.CannotAccessCustomMoodsError, Librarian, "sheet-1-valid-data.csv", "_tests/output-results/", "locked-dir/locked_file.csv") - self.assertRaises(librarian.CannotAccessCustomMoodsError, - Librarian, "sheet-1-valid-data.csv", "_tests/output-results/", "incomplete-moods.json") + + def test_custom_moods_that_are_incomplete(self): + """ + Moodverse can deal with incomplete moods because the file merely expands its default knowledge + Therefore it will still be truthy. + """ + lib_to_test = Librarian("sheet-1-valid-data.csv", "_tests/output-results/", "incomplete-moods.json") + self.assertFalse(lib_to_test.current_mood_set.has_custom_moods) diff --git a/_tests/test_mood.py b/_tests/test_mood.py new file mode 100644 index 0000000..5456904 --- /dev/null +++ b/_tests/test_mood.py @@ -0,0 +1,217 @@ +import logging +from unittest import TestCase + +import utils +from entry.mood import Moodverse, MoodGroup, Mood, MoodNotFoundError +from typing import List + + +# noinspection SpellCheckingInspection +class TestMoodverse(TestCase): + def test_default_moodverse_no_customisation(self): + my_default_moodverse = Moodverse() + self.assertTrue(isinstance(my_default_moodverse["rad"], MoodGroup)) + self.assertTrue(isinstance(my_default_moodverse["good"], MoodGroup)) + self.assertTrue(isinstance(my_default_moodverse["neutral"], MoodGroup)) + self.assertTrue(isinstance(my_default_moodverse["bad"], MoodGroup)) + self.assertTrue(isinstance(my_default_moodverse["awful"], MoodGroup)) + + self.assertEqual(my_default_moodverse["rad"], ["rad"]) + self.assertEqual(my_default_moodverse["good"], ["good"]) + self.assertEqual(my_default_moodverse["neutral"], ["neutral"]) + self.assertEqual(my_default_moodverse["bad"], ["bad"]) + self.assertEqual(my_default_moodverse["awful"], ["awful"]) + + # this is just so I can test whether my __eq__ function overload correctly skips this + self.assertNotEqual(my_default_moodverse["awful"], MoodGroup("awful")) + + # These comparisons should be falsy because the array has more moods than the default mood set initialised + self.assertNotEqual(my_default_moodverse["rad"], ["rad", "amazing"]) + self.assertNotEqual(my_default_moodverse["awful"], ["awful", "miserable"]) + + # This comparison should be falsy because it does not contain the default mood set initialised + # └── known moods of 'neutral' group + # └── neutral <-- from standard + # And we're basically saying, "In neutral group there should only be a 'meh' mood" + self.assertNotEqual(my_default_moodverse["neutral"], ["meh"]) + + def test_loading_valid_moods_into_moodverse(self): + # These moods are self-sufficient, because even if standard mood set didn't exist, they satisfy all requirements + ok_moods_loaded_from_json = { + "rad": + ["rad", "amazing"], + "good": + ["good", "nice"], + "neutral": + ["neutral", "ok", "fine"], + "bad": + ["bad"], + "awful": + ["awful", "miserable"] + } + my_moodverse = Moodverse(ok_moods_loaded_from_json) + self.assertTrue(isinstance(my_moodverse["rad"], MoodGroup)) + self.assertTrue(isinstance(my_moodverse["good"], MoodGroup)) + self.assertTrue(isinstance(my_moodverse["neutral"], MoodGroup)) + self.assertTrue(isinstance(my_moodverse["bad"], MoodGroup)) + self.assertTrue(isinstance(my_moodverse["awful"], MoodGroup)) + + self.assertEqual(my_moodverse["rad"], ["rad", "amazing"]) + self.assertEqual(my_moodverse["good"], ["good", "nice"]) + self.assertEqual(my_moodverse["neutral"], ["neutral", "ok", "fine"]) + self.assertEqual(my_moodverse["bad"], ["bad"]) + self.assertEqual(my_moodverse["awful"], ["awful", "miserable"]) + + def test_loading_semi_valid_moods_into_moodverse(self): + # This mood set isn't self-sufficient, but still valid, because it has all the required "groups". + # Standard mood set is used here to cover for missing moods + # so, when: + semi_ok_moods_loaded_from_json = { + "rad": + ["amazing"], # lacks rad + "good": + ["nice"], # lacks good + "neutral": + ["ok", "fine"], # lacks neutral + "bad": + ["bad"], # OK + "awful": + ["miserable"] # lacks awful + } + + # then I can still use my moodverse, because standard set filled the blanks like so: + # . + # ├── known moods of 'rad' group + # │ └── rad <-- from standard + # │ └── amazing + # ├── known moods of 'great' group + # │ └── great <-- from standard + # │ └── nice + # ├── known moods of 'neutral' group + # │ └── neutral <-- from standard + # │ └── ok + # │ └── fine + # ├── known moods of 'bad' group + # │ └── bad + # └── known moods of 'awful' group + # └── awful <0 from standard + # └── miserable + + my_moodverse = Moodverse(semi_ok_moods_loaded_from_json) + self.assertTrue(isinstance(my_moodverse["rad"], MoodGroup)) + self.assertTrue(isinstance(my_moodverse["good"], MoodGroup)) + self.assertTrue(isinstance(my_moodverse["neutral"], MoodGroup)) + self.assertTrue(isinstance(my_moodverse["bad"], MoodGroup)) + self.assertTrue(isinstance(my_moodverse["awful"], MoodGroup)) + + # responses should be identical to the ones in previous test, because standard mood filled the blanks + self.assertEqual(my_moodverse["rad"], ["rad", "amazing"]) + self.assertEqual(my_moodverse["good"], ["good", "nice"]) + self.assertEqual(my_moodverse["neutral"], ["neutral", "ok", "fine"]) + self.assertEqual(my_moodverse["bad"], ["bad"]) + self.assertEqual(my_moodverse["awful"], ["awful", "miserable"]) + + # let's shuffle the order of values around to check if both lists are still equal + self.assertEqual(my_moodverse["rad"], ["amazing", "rad"]) + self.assertEqual(my_moodverse["good"], ["nice", "good"]) + self.assertEqual(my_moodverse["neutral"], ["ok", "neutral", "fine"]) + self.assertEqual(my_moodverse["bad"], ["bad"]) + self.assertEqual(my_moodverse["awful"], ["miserable", "awful"]) + + def test_get_mood(self): + # These moods are self-sufficient, because even if standard mood set didn't exist, they satisfy all requirements + ok_moods_loaded_from_json = { + "rad": + ["rad", "amazing", "awesome"], + "good": + ["good", "nice", "great"], + "neutral": + ["neutral", "ok", "fine", "whatever"], + "bad": + ["bad", "tired"], + "awful": + ["awful", "miserable"] + } + + query_moodverse = Moodverse(ok_moods_loaded_from_json) + self.assertTrue(query_moodverse.get_mood("fine")) + self.assertTrue(query_moodverse.get_mood("tired")) + self.assertTrue(query_moodverse.get_mood("miserable")) + self.assertTrue(query_moodverse.get_mood("amazing")) + self.assertTrue(query_moodverse.get_mood("bad")) + + self.assertFalse(query_moodverse.get_mood("horrible")) + self.assertFalse(query_moodverse.get_mood("disgusted")) + self.assertFalse(query_moodverse.get_mood("amazed")) + self.assertFalse(query_moodverse.get_mood("clumsy")) + + def test_loading_invalid_moods_into_moodverse(self): + bad_moods_loaded_from_json = { + "rad": + [""], # lacks rad + "good": + [""], # lacks good + "neutral": + [""], # lacks neutral + "bad": + [""], # lacks bad + "awful": + [""] # lacks awful + } + + with self.assertLogs(logging.getLogger(), level=logging.WARNING): + Moodverse(bad_moods_loaded_from_json) + + bad_moods_loaded_from_json = { + "rad": + ["rad"], + "good": + ["good"], + "neutral": + ["neutral"], + "bad": + ["bed"], # lacks bad + "awful": + [""] # lacks awful + } + + with self.assertLogs(logging.getLogger(), level=logging.WARNING): + Moodverse(bad_moods_loaded_from_json) + + bad_moods_loaded_from_json = { + "rad": + ["rad"], + "good": + ["good"], + "neutral": + ["neutral"] + # lacks bed + # lacks awful + } + + with self.assertLogs(logging.getLogger(), level=logging.WARNING): + Moodverse(bad_moods_loaded_from_json) + + +class TestMoodGroup(TestCase): + def test_create_group(self): + self.assertRaises(ValueError, MoodGroup, "") + self.assertTrue(MoodGroup("fancy")) + + def test_create_mood_in_this_group(self): + my_fancy_group = MoodGroup("fancy") + # Add two sample moods to this group + my_fancy_group.create_mood() + my_fancy_group.create_mood("out of this world") + # And one wrong one + with self.assertLogs(logging.getLogger(), logging.WARNING): + my_fancy_group.create_mood("") + + # Check if they exist + + # checks __eq__ overload - obj(group_name) == str(group_name) + self.assertEqual(my_fancy_group["fancy"], "fancy") + self.assertEqual(my_fancy_group["out of this world"], "out of this world") + + # also checks __getitem__ - obj(group_name)[group_name]: List[mood: str] + self.assertSetEqual(set(my_fancy_group.known_moods), {"out of this world", "fancy"}) diff --git a/dated_entries_group.py b/dated_entries_group.py index eda64f6..57606a5 100644 --- a/dated_entries_group.py +++ b/dated_entries_group.py @@ -5,13 +5,17 @@ Here's a quick breakdown of what is the specialisation of this file in the journaling process: all notes -> _NOTES WRITTEN ON A PARTICULAR DATE_ -> a particular note """ +from __future__ import annotations + import re import logging import dated_entry +import entry.mood import errors from typing import List from dated_entry import DatedEntry +from entry.mood import Moodverse import utils @@ -39,6 +43,17 @@ class Date: """ Day, month and year of a particular date. Validates the date string on instantiation. """ + _instances = {} # Class variable to store instances based on date + + def __new__(cls, string: str): + # Check if an instance for the given date already exists + if string in cls._instances: + return cls._instances[string] + else: + # If not, create a new instance + instance = super(Date, cls).__new__(cls) + cls._instances[string] = instance + return instance def __init__(self, string: str): """ @@ -70,6 +85,27 @@ def __str__(self) -> str: """ return '-'.join([self.__year, self.__month, self.__day]) + def __eq__(self, other: 'Date') -> bool: + """Used only for comparing two :class:`Date` objects - itself and another one.""" + if isinstance(other, Date): + return all((other.year == self.year, + other.month == self.month, + other.day == self.day)) + else: + super().__eq__(other) + + @property + def year(self): + return self.__year + + @property + def month(self): + return self.__month + + @property + def day(self): + return self.__day + class DatedEntriesGroup(utils.Core): """ @@ -80,16 +116,29 @@ class DatedEntriesGroup(utils.Core): However, the scribe knows only his papers. The papers contain all entries written that particular date. Truthy if it knows at least one :class:`DatedEntry` made on this :class:`Date`. - :raises InvalidDateError: if the date string is deemed invalid by :class:`Date` """ + _instances = {} + + def __new__(cls, date: str, current_mood_set: Moodverse): + # Check if an instance for the given date already exists + if date in cls._instances: + return cls._instances[date] + else: + # If not, create a new instance + instance = super(DatedEntriesGroup, cls).__new__(cls) + cls._instances[date] = instance + return instance - def __init__(self, date): + def __init__(self, date, current_mood_set: Moodverse): + """ + :raises InvalidDateError: if the date string is deemed invalid by :class:`Date` + :param date: The date for all child entries within. + """ self.__logger = logging.getLogger(self.__class__.__name__) # Try parsing the date and assigning it as your identification (uid) try: super().__init__(Date(date)) - # Date is no good? except InvalidDateError: msg = ErrorMsg.print(ErrorMsg.WRONG_VALUE, date, "YYYY-MM-DD") @@ -98,18 +147,19 @@ def __init__(self, date): # All good - initialise else: - self.__known_entries_for_this_date = {} + self.__known_entries_for_this_date: dict[str, DatedEntry] = {} + self.__known_moods: Moodverse = current_mood_set def create_dated_entry_from_row(self, line: dict[str], - known_moods: dict[str, List[str]] = None) -> dated_entry.DatedEntry: + override_mood_set: Moodverse = Moodverse()) -> dated_entry.DatedEntry: """ :func:`access_dated_entry` of :class:`DatedEntry` object with the specified parameters. :raises TriedCreatingDuplicateDatedEntryError: if it would result in making a duplicate :class:`DatedEntry` :raises IncompleteDataRow: if ``line`` does not have ``time`` and ``mood`` keys at the very least :raises ValueError: re-raises ValueError from :class:`DatedEntry` :param line: a dictionary of strings. Required keys: mood, activities, note_title & note. - :param known_moods: each key of the dict should have a set of strings containing moods. + :param override_mood_set: each key of the dict should have a set of strings containing moods. """ # TODO: test case this # Try accessing the minimum required keys @@ -125,19 +175,19 @@ def create_dated_entry_from_row(self, # Instantiate the entry try: - entry = dated_entry.DatedEntry( + this_entry = dated_entry.DatedEntry( line["time"], line["mood"], - known_moods, activities=line["activities"], title=line["note_title"], - note=line["note"] + note=line["note"], + override_mood_set=self.__known_moods ) except ValueError: raise ValueError else: - self.__known_entries_for_this_date[str(entry.uid)] = entry - return entry + self.__known_entries_for_this_date[str(this_entry.uid)] = this_entry + return this_entry def access_dated_entry(self, time: str) -> DatedEntry: """ diff --git a/dated_entry.py b/dated_entry.py index 3fdce94..f14bd34 100644 --- a/dated_entry.py +++ b/dated_entry.py @@ -5,11 +5,13 @@ Here's a quick breakdown of what is the specialisation of this file in the journaling process: all notes -> notes written on a particular date -> _A PARTICULAR NOTE_ """ +from __future__ import annotations + import logging import re from typing import Match -from typing import List +from entry.mood import Moodverse from config import options import errors import utils @@ -108,8 +110,11 @@ def slice_quotes(string: str) -> str: class ErrorMsg(errors.ErrorMsgBase): - INVALID_MOOD = "Mood {} is missing from a list of known moods. Colouring won't work for this one." - WRONG_PARENT = "Object of class {} is trying to instantiate {} as child. This will end badly." + INVALID_MOOD = "Mood {} is missing from a list of known moods. Not critical, but colouring won't work on the entry." + WRONG_TIME = "Received {}, expected valid time. Cannot create this entry without a valid time." + WRONG_ACTIVITIES = "Expected a non-empty list of activities. In that case just omit this argument in function call." + WRONG_TITLE = "Expected a non-empty title. Omit this argument in function call rather than pass a falsy string." + WRONG_NOTE = "Expected a non-empty note. Omit this argument in function call rather than pass a falsy string." class Time: @@ -167,39 +172,44 @@ class DatedEntry(utils.Core): def __init__(self, time: str, mood: str, - known_moods: dict[str, List[str]], activities: str = None, title: str = None, - note: str = None): + note: str = None, + override_mood_set: Moodverse = Moodverse()): + """ + :param time: Time at which this note was created + :param mood: Mood felt during writing this note + :param activities: (opt.) Activities carried out around or at the time of writing the note + :param title: (opt.) Title of the note + :param note: (opt.) The contents of the journal note itself + :param override_mood_set: Set if you want to use custom :class:`Moodverse` for mood handling + """ # TODO: have to test the whole instantiation function again after refactoring self.__logger = logging.getLogger(self.__class__.__name__) # Processing required properties # --- - # Time + # TIME + # --- try: super().__init__(Time(time)) except IsNotTimeError: - raise ValueError("Cannot create object without valid Time attribute") + errors.ErrorMsgBase.print(ErrorMsg.WRONG_TIME, time) + raise ValueError - # Mood - if len(mood) == 0 or mood is None: - raise ValueError("Cannot create object without valid Mood attribute") - else: - if known_moods is True: - mood_is_in_dictionary = False - for i, (_, this_group) in enumerate(known_moods.items()): - if mood in this_group: - mood_is_in_dictionary = True - break - if not mood_is_in_dictionary: - self.__logger.warning(ErrorMsg.print(ErrorMsg.INVALID_MOOD, mood)) - # Assign it anyway. Warning is enough. - self.__mood = mood + # --- + # MOOD + # --- + # Check if the mood is valid - i.e. it does exist in the currently used Moodverse + if not override_mood_set.get_mood(mood): + errors.ErrorMsgBase.print(ErrorMsg.INVALID_MOOD, mood) + # Warning is enough, it just disables colouring so not big of a deal + self.__mood = mood # Processing other, optional properties # --- # Process activities + # --- self.__activities = [] array = slice_quotes(activities).split(options.csv_delimiter) if len(array) > 0: @@ -208,16 +218,24 @@ def __init__(self, activity, options.tag_activities )) - + else: + errors.ErrorMsgBase.print(ErrorMsg.WRONG_ACTIVITIES) + # --- # Process title + # --- self.__title = None - if title is not None and len(title) > 0: + if title is True and len(title) > 0: self.__title = slice_quotes(title) - + else: + errors.ErrorMsgBase.print(ErrorMsg.WRONG_TITLE) + # --- # Process note + # --- self.__note = None - if note is not None and len(note) > 0: + if note is True and len(note) > 0: self.__note = slice_quotes(note) + else: + errors.ErrorMsgBase.print(ErrorMsg.WRONG_NOTE) @property def mood(self): diff --git a/entry/mood.py b/entry/mood.py new file mode 100644 index 0000000..680285f --- /dev/null +++ b/entry/mood.py @@ -0,0 +1,250 @@ +from __future__ import annotations + +import logging +from typing import List, Optional + +import errors +import utils + + +class ErrorMsg(errors.ErrorMsgBase): + MOOD_GROUP_NOT_FOUND = "Expected to find {} mood group, but the key is missing from the dictionary." + STANDARD_MOODS_USED = "Problem parsing custom moods file. Standard mood set - i.e. {} - will be used." + SKIPPED_INVALID_MOOD = "Skipping \'{}\' as it is not a valid mood." + + +class EmptyValue(utils.CustomException): + """Attribute cannot be set to empty.""" + + +class MoodNotFoundError(utils.CustomException): + """The mood could not be found neither in the standard mood set nor the custom one.""" + + +# noinspection SpellCheckingInspection +class Moodverse: + """ + Moodverse is the single source of truth regarding moods. It always knows the bare-minimum set of moods. + Its knowledge can be expanded with custom mood sets. + """ + def __init__(self, moods_to_process: dict[str, List[str]] = None): + """ + If you want to expand the standard mood set with your custom one, make sure to pass a valid mood set file. + Even one error in the dict causes Moodverse to reject it, and it will then proceed to use just the standard set. + :param moods_to_process: Lists of moods grouped into five dictionary keys: rad, great, neutral, bad & awful + """ + self.__logger = logging.getLogger(self.__class__.__name__) + # DEFAULT PART OF INIT + # -------------------- + # Build a minimal-viable mood set with these five mood groups + # . + # ├── known moods of 'rad' group + # │ └── rad + # ├── known moods of 'great' group + # │ └── great + # ├── known moods of 'neutral' group + # │ └── neutral + # ├── known moods of 'bad' group + # │ └── bad + # └── known moods of 'awful' group + # └── awful + + self.__mood_set: dict[str, MoodGroup] = {} + self.__has_custom_moods = False + + for group_name in ["rad", "good", "neutral", "bad", "awful"]: + # Add the new group to the dict + self.__mood_set[group_name] = MoodGroup(group_name) + # Ask it to create its main mood (e.g. 'rad' for 'rad'-group) + self.__mood_set[group_name].create_mood() + + # We can stop here and be content with our "default" / "standard" mood-set if user did not pass a custom one + # CUSTOM PART OF INIT + # -------------------- + if moods_to_process is not None: + try: + self.__expand_moodset_with_customs(moods_to_process) + except MoodNotFoundError: + msg = ErrorMsg.print(ErrorMsg.STANDARD_MOODS_USED, ', '.join(self.__mood_set.keys())) + self.__logger.warning(msg) + else: + self.__has_custom_moods = True + + def __expand_moodset_with_customs(self, moods_to_process: dict[str, List[str]]) -> None: + """ + Expands the knowledge of this specific :class:`Moodverse` with new custom moods passed by user. + :param moods_to_process: Lists of str moods grouped into five dictionary keys: rad, great, neutral, bad & awful + :raises MoodNotFoundError: if at least one mood was found to be invalid, no new moods will be saved + """ + # Then expand the minimal-viable mood set from Moodverse __init__ + # . + # ├── known moods of 'rad' group + # │ └── rad + # │ └── (add more...) + # ├── known moods of 'great' group + # │ └── great + # │ └── (add more...) + # ├── known moods of 'neutral' group + # │ └── neutral + # │ └── (add more...) + # ├── known moods of 'bad' group + # │ └── bad + # │ └── (add more...) + # └── known moods of 'awful' group + # └── awful + # └── (add more...) + + # Use keys from standard_mood_set to navigate through the passed dictionary and add their unique moods over + # The side effect is that keys in the passed dictionary which do not appear in standard mood set are skipped + + # e.g. I'm expecting a "rad" group to be in the dict + for expected_mood_group in self.__mood_set.keys(): + expected_mood_group: str + try: + # Leap of faith - there must be a "rad" group, otherwise there's no point in continuing + mood_group_to_transfer = moods_to_process[expected_mood_group] + except KeyError: + msg = ErrorMsg.print(ErrorMsg.MOOD_GROUP_NOT_FOUND, expected_mood_group) + self.__logger.warning(msg) + raise MoodNotFoundError(msg) + # Go through each mood in this mood group - e.g. rad - and transfer them in the form of Mood objects + else: + for mood in mood_group_to_transfer: + self.__mood_set[expected_mood_group].create_mood(mood) + + def get_mood(self, value_to_check: str) -> Optional['Mood']: + """ + Checks if the mood exists in the currently used mood set. None if it does not. + :param value_to_check: string with the name of the mood to find + :return: reference to the :class:`Mood` object found in the currently used mood set or None + """ + return next((mood_group.known_moods[value_to_check] for mood_group in self.__mood_set.values() if + value_to_check in mood_group.known_moods), None) + + def __getitem__(self, item: str): + return self.__mood_set[item] + + @property + def known_mood_groups(self): + return self.__mood_set + + @property + def has_custom_moods(self): + return self.__has_custom_moods + + +class AbstractMood: + """ + Provides shared methods for :class:`MoodGroup` and :class:`Mood`. + """ + def __init__(self, value): + if not value or isinstance(value, str) is False: + raise ValueError + else: + self.__name = value + + @property + def name(self) -> str: + return self.__name + + def __str__(self) -> str: + return self.__name + + def __eq__(self, other) -> bool: + if isinstance(other, str): + return str(self) == other + else: + return super().__eq__(other) # of object object + + +class MoodGroup(AbstractMood): + """ + Mood group is an abstract way of thinking about moods, based on categorising them into different groups (or levels). + + **For example**: + Feeling 'awesome' and 'amazing' all belong to the same group. + Therefore, if we categorise moods to different categories, we create mood groups. + They could, for example, range from 5 (best) to 1 (worst). + + Daylio uses 'rad', 'great', 'neutral', 'bad' & 'awful'. + """ + def __init__(self, name_of_the_mood_group: str): + """ + Create a :class:`MoodGroup` object with the specified name. + :param name_of_the_mood_group: Name of the mood group you want to create. + """ + self.__logger = logging.getLogger(self.__class__.__name__) + self.__known_moods: dict[str, Mood] = {} + + super().__init__(name_of_the_mood_group) + + def create_mood(self, name: str = None) -> 'Mood': + """ + Create the specified mood and append its reference to the known moods in this group. + :param name: Name of the mood. If none provided, use the mood group name as its name (e.g. rad group -> rad). + :return: reference to the newly created :class:`Mood` object + """ + # e.g. if no argument is passed, MoodGroup "rad" will create a Mood "rad" + # it's just a shorthand so that you don't have to write MoodGroup("rad").create_mood("rad") + # └── known moods of 'rad' group + # └── rad + final_name = self.name if name is None else name + try: + ref = Mood(final_name) + except (EmptyValue, ValueError): + self.__logger.warning(ErrorMsg.print(ErrorMsg.SKIPPED_INVALID_MOOD, final_name)) + # all went ok + else: + self.__known_moods[final_name] = ref + return ref + + # TODO: possibly could do funky stuff with multiple inheritance - this method could come from Moodverse + @property + def known_moods(self) -> dict[str, 'Mood']: + return self.__known_moods + + # TODO: possibly could do funky stuff with multiple inheritance - this method could come from Moodverse + # I cannot type hint that this method returns a Mood object because of evaluation of annotations problem + # It is discussed here: https://peps.python.org/pep-0563/ + # I could resolve it with ``from __future__ import annotations``, however it requires more recent Python versions + # Right now I go around the problem by providing the class name in apostrophes + def __getitem__(self, item: str) -> 'Mood': + """ + Access the mood in this mood group. + :raises KeyError: there is no such mood in this mood group. + :param item: Which mood would you like to access? + :return: reference to the mood object + """ + if item in self.__known_moods: + return self.__known_moods[item] + else: + raise KeyError + + def __eq__(self, other: List[str]) -> bool: + """ + Does this mood group contain exactly the same moods as in the passed list. + If other types of objects are passed rather than List[str], call the higher-ups. + :param other: list of moods in string format to compare the mood group to + :return: bool + """ + # Used to compare instance of this class to ["mood", "mood", "mood"] and check if they contain the same moods + if isinstance(other, list) and all(isinstance(item, str) for item in other): + # I'm not sure why, but set() instead of pure array makes sure that the order is irrelevant + # therefore ["nice", "good"] == ["good", "nice"] is Truthy, as expected + return set([str(obj) for obj in self.known_moods]) == set(other) + else: + # Call the superclass' __eq__ for any other comparison + return super().__eq__(other) + + +class Mood(AbstractMood): + """ + A specific mood that belongs to a specific mood group. + + *For example*: + + - rad is a mood. It belongs to rad group. + - awesome is a mood. It also belongs to the rad group. + - hungry is a mood. It belongs either to neutral, bad or awful mood groups, depending on user preferences I guess""" + def __init__(self, value: str): + super().__init__(value) diff --git a/errors.py b/errors.py index a3107c5..9650aeb 100644 --- a/errors.py +++ b/errors.py @@ -1,5 +1,6 @@ import logging import sys +from typing import Optional class ColorHandler(logging.StreamHandler): @@ -34,6 +35,7 @@ def emit(self, record): console_log_handler = ColorHandler(sys.stdout) console_log_handler.setLevel(logging.DEBUG) +# noinspection SpellCheckingInspection formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)") console_log_handler.setFormatter(formatter) @@ -49,12 +51,12 @@ class ErrorMsgBase: """ # some common errors have been raised in scope into base class instead of child classes OBJECT_FOUND = "{}-class object found." - OBJECT_NOT_FOUND = "{} object not found." + OBJECT_NOT_FOUND = "{}-class object not found." FAULTY_OBJECT = "Called a {}-class object method but the object has been incorrectly instantiated." WRONG_VALUE = "Received {}, expected {}." @staticmethod - def print(message: str, *args: str) -> str | None: + def print(message: str, *args: str) -> Optional[str]: """ Insert the args into an error message. If the error message expects n variables, provide n arguments. Returns a string with the already filled out message. diff --git a/librarian.py b/librarian.py index b2e45db..ebfce56 100644 --- a/librarian.py +++ b/librarian.py @@ -6,16 +6,20 @@ Librarian knows their identity and can call upon them when needed to recite their contents back to the Librarian. Here's a quick breakdown of what is the specialisation of this file in the journaling process: -└── ALL NOTES - └── notes written on a particular date - └── a particular note + +``└── ALL NOTES`` + ``└── all notes written on a particular date`` + ``└── a particular note`` """ +from __future__ import annotations + import csv import json import logging -from typing import List +import dated_entries_group from config import options +from entry.mood import Moodverse import errors import utils from dated_entries_group import DatedEntriesGroup @@ -56,18 +60,7 @@ class ErrorMsg(errors.ErrorMsgBase): NOT_A_FILE = "{} is not a file." CSV_ALL_FIELDS_PRESENT = "All expected columns are present in the CSV file columns." CSV_FIELDS_MISSING = "The following expected columns are missing: {}" - COUNT_ROWS = "Found {} rows of data in {}." - - -# Here's a quick reference what a "minimal viable" JSON there needs to be if you want to have custom mood-sets. -# If you do not pass a custom one, the application uses the following structure as a fallback mood-set. -standard_mood_set = { - "rad": ["rad"], - "good": ["good"], - "neutral": ["neutral"], - "bad": ["bad"], - "awful": ["awful"] -} + COUNT_ROWS = "{} rows of data found in {}. Of that {} were processed correctly." class MissingValuesInRowError(utils.CustomException): @@ -124,39 +117,31 @@ def __init__(self, :raises CannotAccessFileError: if any problems occur during accessing or decoding the CSV file. :param path_to_moods: The path for a custom mood set file. """ - - self.__known_moods = standard_mood_set - self.__known_dates = {} self.__logger = logging.getLogger(self.__class__.__name__) - self.__destination = path_to_output + self.__known_dates = {} # Let's start processing the file # --- - # 1. Parse the path_to_moods JSON to see if a custom mood-set has to be used - if path_to_moods is not None: - try: - self.__set_custom_moods(path_to_moods) - except (CannotAccessFileError, InvalidDataInFileError): - raise CannotAccessCustomMoodsError + # 1. Parse the path_to_moods JSON for a custom mood set, if possible, or otherwise use standard mood set + # + # P.S Why am I starting first with moods? Because process_file first checks if it has moods installed. + try: + self.__mood_set = self.__create_mood_set(path_to_moods) + except CannotAccessFileError: + raise CannotAccessCustomMoodsError # 2. Access the CSV file and get all the rows with content # then pass the data to specialised data objects that can handle them in a structured way + # TODO: Deal with files that are valid but at the end of parsing have zero lines successfully parsed try: self.__process_file(path_to_file) except (CannotAccessFileError, InvalidDataInFileError): raise CannotAccessJournalError - @property - def custom_moods(self) -> dict[str, List[str]] | None: - """ - :returns: dictionary of rad, good, neutral, bad and awful moods that this Librarian instance knows about - """ - if self.__known_moods != standard_mood_set: - return self.__known_moods - else: - return None + # Ok, if no exceptions were raised so far, the file is good, let's go through the rest of the attributes + self.__destination = path_to_output - def __set_custom_moods(self, json_file: str): + def __create_mood_set(self, json_file: str = None) -> 'Moodverse': """ Overwrite the standard mood-set with a custom one. Mood-sets are used in colour-coding each dated entry. @@ -164,50 +149,43 @@ def __set_custom_moods(self, json_file: str): Should have five keys: ``rad``, ``good``, ``neutral``, ``bad`` and ``awful``. Each of those keys should hold an array of any number of strings indicating various moods. **Example**: ``[{"good": ["good"]},...]`` - :raises CannotAccessFileError: if any problems occur during accessing or decoding the JSON. - :returns: success or failure to set + :returns: reference to the :class:`Moodverse` object """ - exp_path = utils.expand_path(json_file) - try: - with open(exp_path, encoding="UTF-8") as file: - tmp_mood_set = json.load(file) - except FileNotFoundError: - msg = ErrorMsg.print(ErrorMsg.FILE_MISSING, exp_path) - self.__logger.warning(msg) - raise CannotAccessFileError(msg) - except PermissionError: - msg = ErrorMsg.print(ErrorMsg.PERMISSION_ERROR, exp_path) - self.__logger.warning(msg) - raise CannotAccessFileError(msg) - except json.JSONDecodeError: - msg = ErrorMsg.print(ErrorMsg.DECODE_ERROR, exp_path) - self.__logger.warning(msg) - raise CannotAccessFileError(msg) - - # Try accessing each mood key to watch for KeyError if missing - for mood_key in self.__known_moods.keys(): + if json_file: + exp_path = utils.expand_path(json_file) try: - tmp_mood_set[mood_key] - except KeyError: - msg = ErrorMsg.print(ErrorMsg.FILE_INCOMPLETE, exp_path) + with open(exp_path, encoding="UTF-8") as file: + custom_mood_set_from_file = json.load(file) + except FileNotFoundError: + msg = ErrorMsg.print(ErrorMsg.FILE_MISSING, exp_path) self.__logger.warning(msg) - raise InvalidDataInFileError(msg) - else: - continue + raise CannotAccessFileError(msg) + except PermissionError: + msg = ErrorMsg.print(ErrorMsg.PERMISSION_ERROR, exp_path) + self.__logger.warning(msg) + raise CannotAccessFileError(msg) + except json.JSONDecodeError: + msg = ErrorMsg.print(ErrorMsg.DECODE_ERROR, exp_path) + self.__logger.warning(msg) + raise CannotAccessFileError(msg) + else: + custom_mood_set_from_file = None - # At this point, we know each mood key is present so the dictionary is valid - self.__known_moods = tmp_mood_set + # the command works with or without the argument + # - Case 1: no argument = default mood-set + # - Case 2: argument passed, but invalid = default mood-set + # - Case 3: argument passed, it is valid = default mood-set expanded by the custom mood-set + return Moodverse(custom_mood_set_from_file) def __process_file(self, filepath: str) -> bool: """ Validates CSV file and processes it into iterable rows. - :param filepath: path to CSV to be read :raises CannotAccessFileError: if any problems occur during accessing the CSV file. :raises InvalidDataInFileError: if any problems occur during parsing the CSV file. :returns: True if parsed > 0, False otherwise """ - if not self.custom_moods: + if not self.__mood_set.has_custom_moods: self.__logger.info(ErrorMsg.print(ErrorMsg.STANDARD_MOODS_USED)) # Let's determine if the file can be opened @@ -237,7 +215,6 @@ def __process_file(self, filepath: str) -> bool: # Is it a valid CSV? try: # strict parameter throws csv.Error if parsing fails - # if the parsing fails, exit immediately raw_lines = csv.DictReader(file, delimiter=',', quotechar='"', strict=True) except csv.Error: msg = ErrorMsg.print(ErrorMsg.DECODE_ERROR, filepath) @@ -245,7 +222,7 @@ def __process_file(self, filepath: str) -> bool: raise InvalidDataInFileError(msg) # Does it have all the fields? Push any missing field into an array for later reference - # Even if only one column from the list below is missing in the CSV, exit immediately + # Even if only one column from the list below is missing in the CSV, it's a problem while parsing later expected_fields = [ "full_date", "date", @@ -293,14 +270,20 @@ def __process_file(self, filepath: str) -> bool: # Processing # --- lines_parsed = 0 + lines_parsed_successfully = 0 for line in raw_lines: line: dict[str] try: lines_parsed += self.__process_line(line) except MissingValuesInRowError: pass + else: + lines_parsed_successfully += 1 - self.__logger.info(ErrorMsg.print(ErrorMsg.COUNT_ROWS, str(lines_parsed), filepath)) + # Report back how many lines were parsed successfully out of all tried + self.__logger.info(ErrorMsg.print( + ErrorMsg.COUNT_ROWS, str(lines_parsed), filepath, str(lines_parsed_successfully)) + ) # If at least one line has been parsed, the following return resolves to True return bool(lines_parsed) @@ -324,8 +307,11 @@ def __process_line(self, line: dict[str]) -> bool: else: # Let DatedEntriesGroup handle the rest and increment the counter (True == 1) try: - self.access_date(line["full_date"]).create_dated_entry_from_row(line, known_moods=self.__known_moods) - except: + self.access_date(line["full_date"]).create_dated_entry_from_row(line, self.__mood_set) + except (dated_entries_group.TriedCreatingDuplicateDatedEntryError, + dated_entries_group.IncompleteDataRow, + dated_entries_group.InvalidDateError, + ValueError): return False else: return True @@ -333,17 +319,34 @@ def __process_line(self, line: dict[str]) -> bool: def access_date(self, target_date: str) -> DatedEntriesGroup: """ Accesses an already existing or creates a new :class:`DatedEntriesGroup` for the specified ``target_date``. + :raises ValueError: if ``target_date`` is an invalid Date as indicated by :class:`Date` object :param target_date: the date for which a unique :class:`DatedEntriesGroup` object should be created or accessed. :return: reference to :class:`DatedEntriesGroup` object """ - date_obj = DatedEntriesGroup(target_date) + try: + this_date_group = dated_entries_group.DatedEntriesGroup(target_date, self.__mood_set) + except dated_entries_group.InvalidDateError: + raise ValueError # have you already filed this date? - # TODO: maybe I should use a Date object instead of a string for comparison in the dict? - if target_date in self.__known_dates: + if this_date_group.date in self.__known_dates: + # yes self.__logger.debug(ErrorMsg.print(ErrorMsg.OBJECT_FOUND, target_date)) else: + # no, add it to my dict self.__logger.debug(ErrorMsg.print(ErrorMsg.OBJECT_NOT_FOUND, target_date)) - self.__known_dates[date_obj.uid] = date_obj + self.__known_dates[this_date_group.date] = this_date_group + + # in any case + return this_date_group - return date_obj + # Use a dunder overload of getitem to access groups in either way + # 1. my_librarian["2022-10-10"] + # 2. my_librarian.access_date("2022-10-10") + def __getitem__(self, item: str) -> DatedEntriesGroup: + ref = self.access_date(item) + return ref + + @property + def current_mood_set(self): + return self.__mood_set From 0ff7b89ec3c3782ec5b0ae340ec6f4f78a91b6df Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Tue, 16 Jan 2024 17:08:56 +0100 Subject: [PATCH 13/40] deleted hack.py - not needed --- _tests/hack.py | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 _tests/hack.py diff --git a/_tests/hack.py b/_tests/hack.py deleted file mode 100644 index 59d9c2f..0000000 --- a/_tests/hack.py +++ /dev/null @@ -1,10 +0,0 @@ -import sys - -print(sys.argv) - -# very dirty way to work around PyCharm's inability to pass arguments to test runner -# this problem only occurs if you run scripts in Python tests conf -sys.argv.append("_tests/sheet-1-valid.data.csv") -sys.argv.append("_tests/output-results/") - -print(sys.argv) \ No newline at end of file From 7b3b6d1c5d13b5f24494fddd5fa19686bf7d970e Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Wed, 17 Jan 2024 19:03:36 +0100 Subject: [PATCH 14/40] get_console() -> arg_console Pythonic getter --- _tests/test_config.py | 16 ++++++++-------- config.py | 7 ++++--- dated_entry.py | 2 +- librarian.py | 2 +- 4 files changed, 14 insertions(+), 13 deletions(-) diff --git a/_tests/test_config.py b/_tests/test_config.py index 947d43a..bd5f210 100644 --- a/_tests/test_config.py +++ b/_tests/test_config.py @@ -11,7 +11,7 @@ def test_spoofed_keyword_option_without_equality_sign(self): """ # Setup options_to_check = config.SettingsManager() - options_to_check.get_console().add_argument( + options_to_check.arg_console.add_argument( "--force", choices=["accept", "refuse"], default=None @@ -38,7 +38,7 @@ def test_spoofed_keyword_option_with_equality_sign(self): """ # Setup options_to_check = config.SettingsManager() - options_to_check.get_console().add_argument( + options_to_check.arg_console.add_argument( "--force", choices=["accept", "refuse"], default=None @@ -65,11 +65,11 @@ def test_spoofed_keyword_option_with_equality_sign(self): def test_check_if_required_arguments_passed(self): # Setup options_to_check = config.SettingsManager() - options_to_check.get_console().add_argument( + options_to_check.arg_console.add_argument( "filepath", type=str ) - options_to_check.get_console().add_argument( + options_to_check.arg_console.add_argument( "--optional_arg", type=str ) @@ -87,7 +87,7 @@ def test_check_if_required_arguments_passed(self): def test_expected_failure_empty_argument_array(self): # Setup options_to_check = config.SettingsManager() - options_to_check.get_console().add_argument( + options_to_check.arg_console.add_argument( "filepath", type=str ) @@ -107,16 +107,16 @@ def test_if_settings_manager_overwrites_its_properties_from_console(self): options_to_check = config.SettingsManager() options_to_check.filepath = "this is the default value" - options_to_check.get_console().add_argument( + options_to_check.arg_console.add_argument( "filepath", type=str ) - options_to_check.get_console().add_argument( + options_to_check.arg_console.add_argument( "foo", type=str, help="this type of argument does not appear in the SettingsManager list of attributes at setup" ) - options_to_check.get_console().add_argument( + options_to_check.arg_console.add_argument( "bar", type=str, help="this type of argument does not appear in the SettingsManager list of attributes at setup" diff --git a/config.py b/config.py index 5a6875a..b7e4449 100755 --- a/config.py +++ b/config.py @@ -59,14 +59,15 @@ def __init__(self): self.colour = True # TODO: User should be able to set verbosity level in logging - def get_console(self): + @property + def arg_console(self) -> argparse.ArgumentParser: """ Retrieves the :class:`argparse.ArgumentParser` object from :class:`SettingsManager` so you can modify it. :return: :class:`argparse.ArgumentParser` """ return self.__console_arguments - def parse_console(self, args: List[Any]): + def parse_console(self, args: List[Any]) -> None: """ Configures SettingsManager by accessing the console and retrieving the arguments used to run the script. :param args: either console arguments from sys.argv or spoofed ones @@ -90,7 +91,7 @@ def parse_console(self, args: List[Any]): options = SettingsManager() # Add some common options -options.get_console().add_argument( +options.arg_console.add_argument( '--version', action='version', version='%(prog)s 3.0' diff --git a/dated_entry.py b/dated_entry.py index f14bd34..f6890b7 100644 --- a/dated_entry.py +++ b/dated_entry.py @@ -17,7 +17,7 @@ import utils # Adding DatedEntry-specific options in global_settings -dated_entry_settings = options.get_console().add_argument_group( +dated_entry_settings = options.arg_console.add_argument_group( "Dated Entries", "Handles how entries should be formatted" ) diff --git a/librarian.py b/librarian.py index ebfce56..fd79799 100644 --- a/librarian.py +++ b/librarian.py @@ -25,7 +25,7 @@ from dated_entries_group import DatedEntriesGroup # Adding Librarian-specific options in global_settings -librarian_settings = options.get_console().add_argument_group( +librarian_settings = options.arg_console.add_argument_group( "Librarian", "Handles main options" ) From 0238b1bfce01225e407fcf1891166f02052bfa87 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Wed, 17 Jan 2024 19:04:00 +0100 Subject: [PATCH 15/40] remove PyCharm project files from git --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 606714d..493ae7a 100755 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ __pycache__/ _tests/debug.log /_tests/output-results/ /debug.log +.idea/ \ No newline at end of file From ce23e5d47bf4e569e1360c4aa25b86eb64b39e65 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Wed, 17 Jan 2024 19:43:39 +0100 Subject: [PATCH 16/40] fixed all test scenarios - returns OK --- .gitignore | 3 ++- _tests/test_dated_entry.py | 7 ++----- _tests/test_librarian.py | 5 +++-- _tests/test_utils.py | 9 +++++++-- dated_entries_group.py | 16 ++++++++++------ dated_entry.py | 2 ++ librarian.py | 2 +- utils.py | 4 +++- 8 files changed, 30 insertions(+), 18 deletions(-) diff --git a/.gitignore b/.gitignore index 493ae7a..7d24960 100755 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,5 @@ __pycache__/ _tests/debug.log /_tests/output-results/ /debug.log -.idea/ \ No newline at end of file +.idea/ +.vscode/ \ No newline at end of file diff --git a/_tests/test_dated_entry.py b/_tests/test_dated_entry.py index 710053a..90a6f22 100644 --- a/_tests/test_dated_entry.py +++ b/_tests/test_dated_entry.py @@ -40,10 +40,7 @@ def test_bare_minimum_dated_entries(self): # When bare_minimum_dated_entry = DatedEntry( time="1:49 AM", - mood="vaguely ok", - override_mood_set={ - "neutral": ["vaguely ok"] - } + mood="vaguely ok" ) # Then @@ -54,4 +51,4 @@ def test_bare_minimum_dated_entries(self): self.assertTrue(bare_minimum_dated_entry.activities, []) def test_insufficient_dated_entries(self): - self.assertRaises(ValueError, DatedEntry, "2:00", mood="", known_moods={"neutral": ["vaguely ok"]}) + self.assertRaises(ValueError, DatedEntry, time="2:00", mood="") diff --git a/_tests/test_librarian.py b/_tests/test_librarian.py index 6f9d1c9..9e74fc8 100644 --- a/_tests/test_librarian.py +++ b/_tests/test_librarian.py @@ -115,8 +115,9 @@ def test_custom_moods_when_json_invalid(self): def test_custom_moods_that_are_incomplete(self): """ - Moodverse can deal with incomplete moods because the file merely expands its default knowledge - Therefore it will still be truthy. + Moodverse can deal with incomplete moods because the file merely expands its default knowledge. + However, it can only expand it (and be truthy) if the dict with moods has all required groups. + Therefore, since ``incomplete-moods`` lacks the ``good`` group, the assertion will evaluate to False. """ lib_to_test = Librarian("sheet-1-valid-data.csv", "_tests/output-results/", "incomplete-moods.json") self.assertFalse(lib_to_test.current_mood_set.has_custom_moods) diff --git a/_tests/test_utils.py b/_tests/test_utils.py index aa4446f..0ed0789 100644 --- a/_tests/test_utils.py +++ b/_tests/test_utils.py @@ -7,9 +7,12 @@ class TestUtils(TestCase): def test_slugify(self): # no need to check if slug is a valid tag + # noinspection SpellCheckingInspection self.assertEqual(utils.slugify("ConvertThis to-------a SLUG", False), "convertthis-to-a-slug") + # noinspection SpellCheckingInspection self.assertEqual(utils.slugify("Zażółć gęślą jaźń ", False), "zażółć-gęślą-jaźń") - self.assertEqual(utils.slugify(" Multiple spaces between words", False), "multiple-spaces-between-words") + self.assertEqual(utils.slugify(" Multiple spaces between words", False), "multiple-spaces-between-words") + # noinspection SpellCheckingInspection self.assertEqual(utils.slugify("Хлеба нашего повшеднего", False), "хлеба-нашего-повшеднего") # check if the slug is a valid tag @@ -23,5 +26,7 @@ def test_slugify(self): utils.slugify("Digits at the end of the string are also ok 456", True) def test_expand_path(self): + # noinspection SpellCheckingInspection self.assertEqual(utils.expand_path("$HOME/whatever"), "/home/deutschegabanna/whatever") - self.assertEqual(utils.expand_path('~'), "/home/deutschegabanna") + # noinspection SpellCheckingInspection + self.assertEqual(utils.expand_path('~/yes'), "/home/deutschegabanna/yes") diff --git a/dated_entries_group.py b/dated_entries_group.py index 57606a5..c1be148 100644 --- a/dated_entries_group.py +++ b/dated_entries_group.py @@ -7,6 +7,7 @@ """ from __future__ import annotations +from typing import Optional import re import logging @@ -119,7 +120,7 @@ class DatedEntriesGroup(utils.Core): """ _instances = {} - def __new__(cls, date: str, current_mood_set: Moodverse): + def __new__(cls, date: str, current_mood_set: Moodverse = Moodverse()): # Check if an instance for the given date already exists if date in cls._instances: return cls._instances[date] @@ -129,10 +130,11 @@ def __new__(cls, date: str, current_mood_set: Moodverse): cls._instances[date] = instance return instance - def __init__(self, date, current_mood_set: Moodverse): + def __init__(self, date, current_mood_set: Moodverse = Moodverse()): """ :raises InvalidDateError: if the date string is deemed invalid by :class:`Date` :param date: The date for all child entries within. + :param current_mood_set: Use custom :class:`Moodverse` or default if not provided. """ self.__logger = logging.getLogger(self.__class__.__name__) @@ -151,15 +153,13 @@ def __init__(self, date, current_mood_set: Moodverse): self.__known_moods: Moodverse = current_mood_set def create_dated_entry_from_row(self, - line: dict[str], - override_mood_set: Moodverse = Moodverse()) -> dated_entry.DatedEntry: + line: dict[str, str]) -> dated_entry.DatedEntry: """ :func:`access_dated_entry` of :class:`DatedEntry` object with the specified parameters. :raises TriedCreatingDuplicateDatedEntryError: if it would result in making a duplicate :class:`DatedEntry` - :raises IncompleteDataRow: if ``line`` does not have ``time`` and ``mood`` keys at the very least + :raises IncompleteDataRow: if ``line`` does not have ``time mood`` keys at the very least, or either is empty :raises ValueError: re-raises ValueError from :class:`DatedEntry` :param line: a dictionary of strings. Required keys: mood, activities, note_title & note. - :param override_mood_set: each key of the dict should have a set of strings containing moods. """ # TODO: test case this # Try accessing the minimum required keys @@ -168,6 +168,10 @@ def create_dated_entry_from_row(self, line[key] except KeyError: raise IncompleteDataRow(key) + # is it empty then, maybe? + else: + if not line[key]: + raise IncompleteDataRow(key) # Check if there's already an object with this time if line["time"] in self.__known_entries_for_this_date: diff --git a/dated_entry.py b/dated_entry.py index f6890b7..b77695a 100644 --- a/dated_entry.py +++ b/dated_entry.py @@ -200,6 +200,8 @@ def __init__(self, # --- # MOOD # --- + if len(mood) == 0: + raise ValueError # Check if the mood is valid - i.e. it does exist in the currently used Moodverse if not override_mood_set.get_mood(mood): errors.ErrorMsgBase.print(ErrorMsg.INVALID_MOOD, mood) diff --git a/librarian.py b/librarian.py index fd79799..a2e2a80 100644 --- a/librarian.py +++ b/librarian.py @@ -307,7 +307,7 @@ def __process_line(self, line: dict[str]) -> bool: else: # Let DatedEntriesGroup handle the rest and increment the counter (True == 1) try: - self.access_date(line["full_date"]).create_dated_entry_from_row(line, self.__mood_set) + self.access_date(line["full_date"]).create_dated_entry_from_row(line) except (dated_entries_group.TriedCreatingDuplicateDatedEntryError, dated_entries_group.IncompleteDataRow, dated_entries_group.InvalidDateError, diff --git a/utils.py b/utils.py index 594f1b0..e2f3e9b 100755 --- a/utils.py +++ b/utils.py @@ -61,6 +61,8 @@ def expand_path(path): # Gets full path, resolving things like ../ return os.path.realpath( # Expands the tilde (~) character to the user's home directory - os.path.expanduser(path) + os.path.expanduser( + os.path.expandvars(path) + ) ) From 1ac5638f583e1e0a1f670990a5a732838459c573 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bartosz=20Ga=C5=82ek?= <bartosz@galek.com.pl> Date: Thu, 18 Jan 2024 08:52:28 +0100 Subject: [PATCH 17/40] github workflows updated, small tests fixes --- .github/workflows/pylint.yaml | 34 +- .github/workflows/test.yaml | 31 +- .idea/misc.xml | 5 +- .pylintrc | 639 +++++++++++++++++++++++++++++ Pipfile | 11 + Pipfile.lock | 20 + _tests/sheet-6-empty-file.csv | 0 _tests/test_config.py | 2 +- _tests/test_dated_entries_group.py | 7 +- _tests/test_dated_entry.py | 1 + _tests/test_errors.py | 1 + _tests/test_librarian.py | 42 +- _tests/test_mood.py | 4 +- _tests/test_utils.py | 8 +- dated_entries_group.py | 48 +-- dated_entry.py | 4 +- entry/mood.py | 19 +- errors.py | 3 +- librarian.py | 24 +- utils.py | 6 +- 20 files changed, 777 insertions(+), 132 deletions(-) create mode 100644 .pylintrc create mode 100644 Pipfile create mode 100644 Pipfile.lock create mode 100644 _tests/sheet-6-empty-file.csv diff --git a/.github/workflows/pylint.yaml b/.github/workflows/pylint.yaml index b5ee0c6..dcf818b 100755 --- a/.github/workflows/pylint.yaml +++ b/.github/workflows/pylint.yaml @@ -1,27 +1,15 @@ name: Pylint - -on: [push] - +on: [ pull_request ] jobs: - build: - + lint: runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.8 - uses: actions/setup-python@v1 - with: - python-version: 3.8 - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install pylint - - name: Analysing the code with pylint - run: | - find . -name '*.py' -exec pylint {} --fail-under=8 \; - pylint_exit_code=$? - if [ $pylint_exit_code -ne 0 ]; then - echo "Pylint check failed with exit code $pylint_exit_code" - exit $pylint_exit_code - fi \ No newline at end of file + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install pylint + - uses: pr-annotators/pylint-pr-annotator@main + - name: Analysing the code with pylint + run: pylint $(git ls-files '*.py') \ No newline at end of file diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index eda3f64..b92d6a9 100755 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -1,20 +1,17 @@ name: Test - -on: [push] - +on: [ pull_request ] jobs: - build: - - runs-on: ubuntu-latest - + test: + strategy: + matrix: + # https://devguide.python.org/versions/ + python: [ 3.11, 3.12 ] + os: [ ubuntu-latest, windows-latest ] + runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.8 - uses: actions/setup-python@v1 - with: - python-version: 3.8 - - name: Discover tests - run: | - echo "Starting discovery..." - python -m unittest discover -s "./_tests" - + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python }} + - name: Discover tests + run: python -m unittest discover -s ./_tests diff --git a/.idea/misc.xml b/.idea/misc.xml index 2a75f48..296c362 100644 --- a/.idea/misc.xml +++ b/.idea/misc.xml @@ -1,6 +1,9 @@ <?xml version="1.0" encoding="UTF-8"?> <project version="4"> - <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8" project-jdk-type="Python SDK" /> + <component name="Black"> + <option name="sdkName" value="Pipenv (Obsidian-Daylio-Parser)" /> + </component> + <component name="ProjectRootManager" version="2" project-jdk-name="Pipenv (Obsidian-Daylio-Parser)" project-jdk-type="Python SDK" /> <component name="PythonCompatibilityInspectionAdvertiser"> <option name="version" value="3" /> </component> diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 0000000..c4bd0c7 --- /dev/null +++ b/.pylintrc @@ -0,0 +1,639 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis). It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.12 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +source-roots= + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _ + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type alias names. If left empty, type +# alias names will be checked with the set naming style. +#typealias-rgx= + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )?<?https?://\S+>?$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-symbolic-message-instead, + use-implicit-booleaness-not-comparison-to-string, + use-implicit-booleaness-not-comparison-to-zero, + line-too-long, + fixme, + missing-module-docstring, + missing-function-docstring, + missing-class-docstring + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are: text, parseable, colorized, +# json2 (improved json format), json (old json format) and msvs (visual +# studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/Pipfile b/Pipfile new file mode 100644 index 0000000..645a67e --- /dev/null +++ b/Pipfile @@ -0,0 +1,11 @@ +[[source]] +url = "https://pypi.org/simple" +verify_ssl = true +name = "pypi" + +[packages] + +[dev-packages] + +[requires] +python_version = "3.12" diff --git a/Pipfile.lock b/Pipfile.lock new file mode 100644 index 0000000..b6df5da --- /dev/null +++ b/Pipfile.lock @@ -0,0 +1,20 @@ +{ + "_meta": { + "hash": { + "sha256": "702ad05de9bc9de99a4807c8dde1686f31e0041d7b5f6f6b74861195a52110f5" + }, + "pipfile-spec": 6, + "requires": { + "python_version": "3.12" + }, + "sources": [ + { + "name": "pypi", + "url": "https://pypi.org/simple", + "verify_ssl": true + } + ] + }, + "default": {}, + "develop": {} +} diff --git a/_tests/sheet-6-empty-file.csv b/_tests/sheet-6-empty-file.csv new file mode 100644 index 0000000..e69de29 diff --git a/_tests/test_config.py b/_tests/test_config.py index bd5f210..cce4763 100644 --- a/_tests/test_config.py +++ b/_tests/test_config.py @@ -125,7 +125,7 @@ def test_if_settings_manager_overwrites_its_properties_from_console(self): # User input correct - should pass options_to_check.parse_console(["this is NOT the default value", "hello", "world"]) self.assertEqual(options_to_check.filepath, "this is NOT the default value") - self.assertNotEquals(options_to_check.filepath, "this is the default value") + self.assertNotEqual(options_to_check.filepath, "this is the default value") # because neither "foo" nor "bar" is part of the SettingsManager class, I need to access it like a key in dict self.assertEqual(vars(options_to_check)["foo"], "hello") self.assertEqual(vars(options_to_check)["bar"], "world") diff --git a/_tests/test_dated_entries_group.py b/_tests/test_dated_entries_group.py index b6d475d..c090ee1 100644 --- a/_tests/test_dated_entries_group.py +++ b/_tests/test_dated_entries_group.py @@ -1,3 +1,4 @@ +import unittest from unittest import TestCase from dated_entries_group import DatedEntriesGroup, InvalidDateError, \ @@ -162,11 +163,13 @@ def test_falseness_of_dated_entries_group(self): self.assertEqual(len(another_day.known_entries_from_this_day), 0) self.assertFalse(another_day.known_entries_from_this_day) + @unittest.skip("not yet implemented") def test_no_duplicate_entries_created(self): """ DatedEntriesGroup should return the already existing entry if it is known, instead of creating a duplicate. """ - pass + self.assertEqual(True, True) + @unittest.skip("not yet implemented") def test_retrieve_known_entries(self): - pass + self.assertEqual(True, True) diff --git a/_tests/test_dated_entry.py b/_tests/test_dated_entry.py index 90a6f22..5b5f456 100644 --- a/_tests/test_dated_entry.py +++ b/_tests/test_dated_entry.py @@ -1,4 +1,5 @@ from unittest import TestCase + from dated_entry import Time, slice_quotes, DatedEntry, IsNotTimeError diff --git a/_tests/test_errors.py b/_tests/test_errors.py index 0f9b0ab..73a3ac6 100644 --- a/_tests/test_errors.py +++ b/_tests/test_errors.py @@ -1,4 +1,5 @@ from unittest import TestCase + import errors diff --git a/_tests/test_librarian.py b/_tests/test_librarian.py index 9e74fc8..0bc8d88 100644 --- a/_tests/test_librarian.py +++ b/_tests/test_librarian.py @@ -11,17 +11,19 @@ class TestLibrarian(TestCase): We use internal class methods to check proper handling of data throughout the process. """ def test_init_valid_csv(self): - self.assertTrue(Librarian("sheet-1-valid-data.csv")) + self.assertTrue(Librarian("_tests/sheet-1-valid-data.csv")) def test_init_invalid_csv(self): """ Pass faulty files and see if it fails as expected. """ - self.assertRaises(librarian.CannotAccessFileError, Librarian, "sheet-2-corrupted-bytes.csv") - self.assertRaises(librarian.CannotAccessFileError, Librarian, "sheet-3-wrong-format.txt") - self.assertRaises(librarian.CannotAccessFileError, Librarian, "sheet-4-no-extension.csv") - self.assertRaises(librarian.CannotAccessFileError, Librarian, "sheet-5-missing-file.csv") - self.assertRaises(librarian.CannotAccessFileError, Librarian, "sheet-6-empty-file.csv") + self.assertRaises(librarian.CannotAccessFileError, Librarian, "_tests/sheet-2-corrupted-bytes.csv") + self.assertRaises(librarian.CannotAccessFileError, Librarian, "_tests/sheet-3-wrong-format.txt") + self.assertRaises(librarian.CannotAccessFileError, Librarian, "_tests/sheet-4-no-extension") + self.assertRaises(librarian.CannotAccessFileError, Librarian, "_tests/sheet-5-missing-file.csv") + + # TODO: handle this case in Librarian + # self.assertRaises(librarian.CannotAccessFileError, Librarian, "_tests/sheet-6-empty-file.csv") # TODO: maybe generate corrupted_sheet and wrong_format during runner setup in workflow mode? # dd if=/dev/urandom of="$corrupted_file" bs=1024 count=10 @@ -32,12 +34,12 @@ def test_init_invalid_csv(self): def test_valid_access_dates(self): """ - All the following dates exist in the sheet-1-valid-data.csv and should be accessible by ``lib``. + All the following dates exist in the _tests/sheet-1-valid-data.csv and should be accessible by ``lib``. """ # When lib = Librarian( - path_to_file="sheet-1-valid-data.csv", - path_to_moods="../moods.json" + path_to_file="_tests/sheet-1-valid-data.csv", + path_to_moods="moods.json" ) # Then @@ -54,12 +56,12 @@ def test_valid_access_dates(self): def test_wrong_access_dates(self): """ - **None** of the following dates exist in the sheet-1-valid-data.csv and should **NOT** be accessible by ``lib``. + **None** of the following dates exist in the _tests/sheet-1-valid-data.csv and should **NOT** be accessible by ``lib``. """ # When lib = Librarian( - path_to_file="sheet-1-valid-data.csv", - path_to_moods="../moods.json" + path_to_file="_tests/sheet-1-valid-data.csv", + path_to_moods="moods.json" ) # Then can access valid dates, even if they weren't in the file @@ -88,30 +90,30 @@ def test_wrong_access_dates(self): def test_custom_moods_when_passed_correctly(self): """Pass a valid JSON file and see if it knows it has access to custom moods now.""" self.assertTrue(Librarian( - path_to_file="sheet-1-valid-data.csv", - path_to_moods="../moods.json" + path_to_file="_tests/sheet-1-valid-data.csv", + path_to_moods="moods.json" ).current_mood_set.has_custom_moods) def test_custom_moods_when_not_passed(self): """Pass no moods and see if it know it only has standard moods available.""" self.assertFalse(Librarian( - path_to_file="sheet-1-valid-data.csv" + path_to_file="_tests/sheet-1-valid-data.csv" ).current_mood_set.has_custom_moods) def test_custom_moods_with_invalid_jsons(self): """Pass faulty moods and see if it fails as expected.""" self.assertRaises( librarian.CannotAccessCustomMoodsError, - Librarian, "sheet-1-valid-data.csv", "_tests/output-results", "empty_sheet.csv" + Librarian, "_tests/_tests/sheet-1-valid-data.csv", "_tests/output-results", "empty_sheet.csv" ) def test_custom_moods_when_json_invalid(self): self.assertRaises(librarian.CannotAccessCustomMoodsError, - Librarian, "sheet-1-valid-data.csv", "_tests/output-results/", "empty_sheet.csv") + Librarian, "_tests/_tests/sheet-1-valid-data.csv", "_tests/output-results/", "empty_sheet.csv") self.assertRaises(librarian.CannotAccessCustomMoodsError, - Librarian, "sheet-1-valid-data.csv", "_tests/output-results/", "missing-file.json") + Librarian, "_tests/_tests/sheet-1-valid-data.csv", "_tests/output-results/", "missing-file.json") self.assertRaises(librarian.CannotAccessCustomMoodsError, - Librarian, "sheet-1-valid-data.csv", "_tests/output-results/", "locked-dir/locked_file.csv") + Librarian, "_tests/_tests/sheet-1-valid-data.csv", "_tests/output-results/", "locked-dir/locked_file.csv") def test_custom_moods_that_are_incomplete(self): """ @@ -119,5 +121,5 @@ def test_custom_moods_that_are_incomplete(self): However, it can only expand it (and be truthy) if the dict with moods has all required groups. Therefore, since ``incomplete-moods`` lacks the ``good`` group, the assertion will evaluate to False. """ - lib_to_test = Librarian("sheet-1-valid-data.csv", "_tests/output-results/", "incomplete-moods.json") + lib_to_test = Librarian("_tests/sheet-1-valid-data.csv", "_tests/output-results/", "_tests/incomplete-moods.json") self.assertFalse(lib_to_test.current_mood_set.has_custom_moods) diff --git a/_tests/test_mood.py b/_tests/test_mood.py index 5456904..bc0ebd9 100644 --- a/_tests/test_mood.py +++ b/_tests/test_mood.py @@ -1,9 +1,7 @@ import logging from unittest import TestCase -import utils -from entry.mood import Moodverse, MoodGroup, Mood, MoodNotFoundError -from typing import List +from entry.mood import Moodverse, MoodGroup # noinspection SpellCheckingInspection diff --git a/_tests/test_utils.py b/_tests/test_utils.py index 0ed0789..3ed7d34 100644 --- a/_tests/test_utils.py +++ b/_tests/test_utils.py @@ -1,6 +1,6 @@ -import os.path -from unittest import TestCase import logging +from unittest import TestCase + import utils @@ -27,6 +27,6 @@ def test_slugify(self): def test_expand_path(self): # noinspection SpellCheckingInspection - self.assertEqual(utils.expand_path("$HOME/whatever"), "/home/deutschegabanna/whatever") + self.assertFalse(utils.expand_path("$HOME/whatever").startswith("$HOME")) # noinspection SpellCheckingInspection - self.assertEqual(utils.expand_path('~/yes'), "/home/deutschegabanna/yes") + self.assertFalse(utils.expand_path('~/yes').startswith('~')) diff --git a/dated_entries_group.py b/dated_entries_group.py index c1be148..5b6a498 100644 --- a/dated_entries_group.py +++ b/dated_entries_group.py @@ -7,17 +7,14 @@ """ from __future__ import annotations -from typing import Optional -import re import logging +import re import dated_entry -import entry.mood import errors -from typing import List +import utils from dated_entry import DatedEntry from entry.mood import Moodverse -import utils class DatedEntryMissingError(utils.CustomException): @@ -50,11 +47,10 @@ def __new__(cls, string: str): # Check if an instance for the given date already exists if string in cls._instances: return cls._instances[string] - else: - # If not, create a new instance - instance = super(Date, cls).__new__(cls) - cls._instances[string] = instance - return instance + # If not, create a new instance + instance = super(Date, cls).__new__(cls) + cls._instances[string] = instance + return instance def __init__(self, string: str): """ @@ -92,8 +88,7 @@ def __eq__(self, other: 'Date') -> bool: return all((other.year == self.year, other.month == self.month, other.day == self.day)) - else: - super().__eq__(other) + super().__eq__(other) @property def year(self): @@ -124,11 +119,10 @@ def __new__(cls, date: str, current_mood_set: Moodverse = Moodverse()): # Check if an instance for the given date already exists if date in cls._instances: return cls._instances[date] - else: - # If not, create a new instance - instance = super(DatedEntriesGroup, cls).__new__(cls) - cls._instances[date] = instance - return instance + # If not, create a new instance + instance = super(DatedEntriesGroup, cls).__new__(cls) + cls._instances[date] = instance + return instance def __init__(self, date, current_mood_set: Moodverse = Moodverse()): """ @@ -148,9 +142,8 @@ def __init__(self, date, current_mood_set: Moodverse = Moodverse()): raise InvalidDateError(msg) # All good - initialise - else: - self.__known_entries_for_this_date: dict[str, DatedEntry] = {} - self.__known_moods: Moodverse = current_mood_set + self.__known_entries_for_this_date: dict[str, DatedEntry] = {} + self.__known_moods: Moodverse = current_mood_set def create_dated_entry_from_row(self, line: dict[str, str]) -> dated_entry.DatedEntry: @@ -169,9 +162,8 @@ def create_dated_entry_from_row(self, except KeyError: raise IncompleteDataRow(key) # is it empty then, maybe? - else: - if not line[key]: - raise IncompleteDataRow(key) + if not line[key]: + raise IncompleteDataRow(key) # Check if there's already an object with this time if line["time"] in self.__known_entries_for_this_date: @@ -189,9 +181,8 @@ def create_dated_entry_from_row(self, ) except ValueError: raise ValueError - else: - self.__known_entries_for_this_date[str(this_entry.uid)] = this_entry - return this_entry + self.__known_entries_for_this_date[str(this_entry.uid)] = this_entry + return this_entry def access_dated_entry(self, time: str) -> DatedEntry: """ @@ -206,9 +197,8 @@ def access_dated_entry(self, time: str) -> DatedEntry: msg = ErrorMsg.print(ErrorMsg.OBJECT_NOT_FOUND, time) self.__logger.warning(msg) raise DatedEntryMissingError(msg) - else: - self.__logger.debug(ErrorMsg.print(ErrorMsg.OBJECT_FOUND, time)) - return ref + self.__logger.debug(ErrorMsg.print(ErrorMsg.OBJECT_FOUND, time)) + return ref @property def known_entries_from_this_day(self): diff --git a/dated_entry.py b/dated_entry.py index b77695a..524a02c 100644 --- a/dated_entry.py +++ b/dated_entry.py @@ -11,10 +11,10 @@ import re from typing import Match -from entry.mood import Moodverse -from config import options import errors import utils +from config import options +from entry.mood import Moodverse # Adding DatedEntry-specific options in global_settings dated_entry_settings = options.arg_console.add_argument_group( diff --git a/entry/mood.py b/entry/mood.py index 680285f..256cc04 100644 --- a/entry/mood.py +++ b/entry/mood.py @@ -108,9 +108,8 @@ def __expand_moodset_with_customs(self, moods_to_process: dict[str, List[str]]) self.__logger.warning(msg) raise MoodNotFoundError(msg) # Go through each mood in this mood group - e.g. rad - and transfer them in the form of Mood objects - else: - for mood in mood_group_to_transfer: - self.__mood_set[expected_mood_group].create_mood(mood) + for mood in mood_group_to_transfer: + self.__mood_set[expected_mood_group].create_mood(mood) def get_mood(self, value_to_check: str) -> Optional['Mood']: """ @@ -140,8 +139,7 @@ class AbstractMood: def __init__(self, value): if not value or isinstance(value, str) is False: raise ValueError - else: - self.__name = value + self.__name = value @property def name(self) -> str: @@ -153,8 +151,7 @@ def __str__(self) -> str: def __eq__(self, other) -> bool: if isinstance(other, str): return str(self) == other - else: - return super().__eq__(other) # of object object + return super().__eq__(other) # of object object class MoodGroup(AbstractMood): @@ -217,8 +214,7 @@ def __getitem__(self, item: str) -> 'Mood': """ if item in self.__known_moods: return self.__known_moods[item] - else: - raise KeyError + raise KeyError def __eq__(self, other: List[str]) -> bool: """ @@ -232,9 +228,8 @@ def __eq__(self, other: List[str]) -> bool: # I'm not sure why, but set() instead of pure array makes sure that the order is irrelevant # therefore ["nice", "good"] == ["good", "nice"] is Truthy, as expected return set([str(obj) for obj in self.known_moods]) == set(other) - else: - # Call the superclass' __eq__ for any other comparison - return super().__eq__(other) + # Call the superclass' __eq__ for any other comparison + return super().__eq__(other) class Mood(AbstractMood): diff --git a/errors.py b/errors.py index 9650aeb..8b8e82b 100644 --- a/errors.py +++ b/errors.py @@ -68,5 +68,4 @@ def print(message: str, *args: str) -> Optional[str]: f"Expected {expected_args} arguments for \"{message}\", but got {len(args)} instead." ) return None - else: - return message.format(*args) + return message.format(*args) diff --git a/librarian.py b/librarian.py index a2e2a80..e79e7a4 100644 --- a/librarian.py +++ b/librarian.py @@ -18,11 +18,11 @@ import logging import dated_entries_group -from config import options -from entry.mood import Moodverse import errors import utils +from config import options from dated_entries_group import DatedEntriesGroup +from entry.mood import Moodverse # Adding Librarian-specific options in global_settings librarian_settings = options.arg_console.add_argument_group( @@ -304,17 +304,15 @@ def __process_line(self, line: dict[str]) -> bool: msg = ErrorMsg.print(ErrorMsg.FILE_INCOMPLETE, str(line)) self.__logger.warning(msg) raise MissingValuesInRowError(msg) - else: - # Let DatedEntriesGroup handle the rest and increment the counter (True == 1) - try: - self.access_date(line["full_date"]).create_dated_entry_from_row(line) - except (dated_entries_group.TriedCreatingDuplicateDatedEntryError, - dated_entries_group.IncompleteDataRow, - dated_entries_group.InvalidDateError, - ValueError): - return False - else: - return True + # Let DatedEntriesGroup handle the rest and increment the counter (True == 1) + try: + self.access_date(line["full_date"]).create_dated_entry_from_row(line) + except (dated_entries_group.TriedCreatingDuplicateDatedEntryError, + dated_entries_group.IncompleteDataRow, + dated_entries_group.InvalidDateError, + ValueError): + return False + return True def access_date(self, target_date: str) -> DatedEntriesGroup: """ diff --git a/utils.py b/utils.py index e2f3e9b..52faf03 100755 --- a/utils.py +++ b/utils.py @@ -1,9 +1,10 @@ """ Contains universally useful functions """ -import re -import os import logging +import os +import re + import errors @@ -65,4 +66,3 @@ def expand_path(path): os.path.expandvars(path) ) ) - From 59991b0924c86a3d9a0794417d8b494383608ecb Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Thu, 18 Jan 2024 22:17:59 +0100 Subject: [PATCH 18/40] fixed relative imports into module imports --- _tests/__init__.py | 0 _tests/test_config.py | 2 +- _tests/test_dated_entries_group.py | 7 +++++-- _tests/test_dated_entry.py | 6 +++++- _tests/test_errors.py | 2 +- _tests/test_librarian.py | 4 ++-- _tests/test_mood.py | 4 +++- _tests/test_utils.py | 8 ++++---- src/__init__.py | 0 config.py => src/config.py | 0 dated_entries_group.py => src/dated_entries_group.py | 10 +++++----- dated_entry.py => src/dated_entry.py | 8 ++++---- {entry => src/entry}/mood.py | 4 ++-- errors.py => src/errors.py | 0 librarian.py => src/librarian.py | 12 ++++++------ main.py => src/main.py | 4 ++-- utils.py => src/utils.py | 2 +- 17 files changed, 41 insertions(+), 32 deletions(-) create mode 100644 _tests/__init__.py create mode 100644 src/__init__.py rename config.py => src/config.py (100%) rename dated_entries_group.py => src/dated_entries_group.py (98%) rename dated_entry.py => src/dated_entry.py (98%) rename {entry => src/entry}/mood.py (99%) rename errors.py => src/errors.py (100%) rename librarian.py => src/librarian.py (98%) rename main.py => src/main.py (85%) rename utils.py => src/utils.py (98%) diff --git a/_tests/__init__.py b/_tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/_tests/test_config.py b/_tests/test_config.py index cce4763..e2ae8aa 100644 --- a/_tests/test_config.py +++ b/_tests/test_config.py @@ -1,6 +1,6 @@ from unittest import TestCase -import config +from src import config class TestSettingsManager(TestCase): diff --git a/_tests/test_dated_entries_group.py b/_tests/test_dated_entries_group.py index c090ee1..3350814 100644 --- a/_tests/test_dated_entries_group.py +++ b/_tests/test_dated_entries_group.py @@ -1,8 +1,11 @@ import unittest from unittest import TestCase -from dated_entries_group import DatedEntriesGroup, InvalidDateError, \ - DatedEntryMissingError, TriedCreatingDuplicateDatedEntryError, \ +from src.dated_entries_group import \ + DatedEntriesGroup,\ + InvalidDateError, \ + DatedEntryMissingError,\ + TriedCreatingDuplicateDatedEntryError, \ IncompleteDataRow diff --git a/_tests/test_dated_entry.py b/_tests/test_dated_entry.py index 5b5f456..1283bf2 100644 --- a/_tests/test_dated_entry.py +++ b/_tests/test_dated_entry.py @@ -1,6 +1,10 @@ from unittest import TestCase -from dated_entry import Time, slice_quotes, DatedEntry, IsNotTimeError +from src.dated_entry import \ + Time, \ + slice_quotes, \ + DatedEntry, \ + IsNotTimeError class TestDatedEntryUtils(TestCase): diff --git a/_tests/test_errors.py b/_tests/test_errors.py index 73a3ac6..a660348 100644 --- a/_tests/test_errors.py +++ b/_tests/test_errors.py @@ -1,6 +1,6 @@ from unittest import TestCase -import errors +from src import errors class TestErrorMsgBase(TestCase): diff --git a/_tests/test_librarian.py b/_tests/test_librarian.py index 0bc8d88..17500a2 100644 --- a/_tests/test_librarian.py +++ b/_tests/test_librarian.py @@ -1,7 +1,7 @@ from unittest import TestCase -import librarian -from librarian import Librarian +from src import librarian +from src.librarian import Librarian class TestLibrarian(TestCase): diff --git a/_tests/test_mood.py b/_tests/test_mood.py index bc0ebd9..1e28aac 100644 --- a/_tests/test_mood.py +++ b/_tests/test_mood.py @@ -1,7 +1,9 @@ import logging from unittest import TestCase -from entry.mood import Moodverse, MoodGroup +from src.entry.mood import \ + Moodverse, \ + MoodGroup # noinspection SpellCheckingInspection diff --git a/_tests/test_utils.py b/_tests/test_utils.py index 3ed7d34..686c70a 100644 --- a/_tests/test_utils.py +++ b/_tests/test_utils.py @@ -1,7 +1,7 @@ import logging from unittest import TestCase -import utils +from src import utils class TestUtils(TestCase): @@ -16,13 +16,13 @@ def test_slugify(self): self.assertEqual(utils.slugify("Хлеба нашего повшеднего", False), "хлеба-нашего-повшеднего") # check if the slug is a valid tag - with self.assertLogs(logging.getLogger("utils"), logging.WARNING): + with self.assertLogs(logging.getLogger("src.utils"), logging.WARNING): utils.slugify("1. Digit cannot appear at the beginning of a tag", True) - with self.assertNoLogs(logging.getLogger("utils"), logging.WARNING): + with self.assertNoLogs(logging.getLogger("src.utils"), logging.WARNING): utils.slugify("Digits within the string 1234 - are ok", True) - with self.assertNoLogs(logging.getLogger("utils"), logging.WARNING): + with self.assertNoLogs(logging.getLogger("src.utils"), logging.WARNING): utils.slugify("Digits at the end of the string are also ok 456", True) def test_expand_path(self): diff --git a/src/__init__.py b/src/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/config.py b/src/config.py similarity index 100% rename from config.py rename to src/config.py diff --git a/dated_entries_group.py b/src/dated_entries_group.py similarity index 98% rename from dated_entries_group.py rename to src/dated_entries_group.py index 5b6a498..081bd27 100644 --- a/dated_entries_group.py +++ b/src/dated_entries_group.py @@ -10,11 +10,11 @@ import logging import re -import dated_entry -import errors -import utils -from dated_entry import DatedEntry -from entry.mood import Moodverse +from src import dated_entry +from src import errors +from src import utils +from src.dated_entry import DatedEntry +from src.entry.mood import Moodverse class DatedEntryMissingError(utils.CustomException): diff --git a/dated_entry.py b/src/dated_entry.py similarity index 98% rename from dated_entry.py rename to src/dated_entry.py index 524a02c..b487f92 100644 --- a/dated_entry.py +++ b/src/dated_entry.py @@ -11,10 +11,10 @@ import re from typing import Match -import errors -import utils -from config import options -from entry.mood import Moodverse +from src import errors +from src import utils +from src.config import options +from src.entry.mood import Moodverse # Adding DatedEntry-specific options in global_settings dated_entry_settings = options.arg_console.add_argument_group( diff --git a/entry/mood.py b/src/entry/mood.py similarity index 99% rename from entry/mood.py rename to src/entry/mood.py index 256cc04..5200623 100644 --- a/entry/mood.py +++ b/src/entry/mood.py @@ -3,8 +3,8 @@ import logging from typing import List, Optional -import errors -import utils +from src import errors +from src import utils class ErrorMsg(errors.ErrorMsgBase): diff --git a/errors.py b/src/errors.py similarity index 100% rename from errors.py rename to src/errors.py diff --git a/librarian.py b/src/librarian.py similarity index 98% rename from librarian.py rename to src/librarian.py index e79e7a4..4851d91 100644 --- a/librarian.py +++ b/src/librarian.py @@ -17,12 +17,12 @@ import json import logging -import dated_entries_group -import errors -import utils -from config import options -from dated_entries_group import DatedEntriesGroup -from entry.mood import Moodverse +from src import dated_entries_group +from src import errors +from src import utils +from src.config import options +from src.dated_entries_group import DatedEntriesGroup +from src.entry.mood import Moodverse # Adding Librarian-specific options in global_settings librarian_settings = options.arg_console.add_argument_group( diff --git a/main.py b/src/main.py similarity index 85% rename from main.py rename to src/main.py index a13285e..fa9afb9 100755 --- a/main.py +++ b/src/main.py @@ -2,8 +2,8 @@ import logging import sys -from config import options -from librarian import Librarian +from src.config import options +from src.librarian import Librarian logger = logging.getLogger(__name__) diff --git a/utils.py b/src/utils.py similarity index 98% rename from utils.py rename to src/utils.py index 52faf03..81d623c 100755 --- a/utils.py +++ b/src/utils.py @@ -5,7 +5,7 @@ import os import re -import errors +from src import errors class ErrorMsg(errors.ErrorMsgBase): From aab76a3cbd05e1ef8c929be43ab7232ad6c930aa Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Sun, 21 Jan 2024 12:53:09 +0100 Subject: [PATCH 19/40] output OK --- .gitignore | 1 - .idea/Obsidian-Daylio-Parser.iml | 15 -- .idea/misc.xml | 5 +- _tests/expected_results/2022-10-25.md | 12 +- _tests/expected_results/2022-10-26.md | 26 +- _tests/expected_results/2022-10-27.md | 10 +- _tests/expected_results/2022-10-30.md | 6 +- _tests/test_dated_entries_group.py | 6 +- _tests/test_dated_entry.py | 6 +- _tests/test_librarian.py | 25 +- _tests/test_output.py | 350 ++++++++++++++++++++++++++ src/dated_entries_group.py | 45 +++- src/dated_entry.py | 52 +++- src/librarian.py | 62 +++-- src/utils.py | 9 +- 15 files changed, 544 insertions(+), 86 deletions(-) delete mode 100644 .idea/Obsidian-Daylio-Parser.iml create mode 100644 _tests/test_output.py diff --git a/.gitignore b/.gitignore index 7d24960..921e499 100755 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,5 @@ __pycache__/ _tests/debug.log /_tests/output-results/ -/debug.log .idea/ .vscode/ \ No newline at end of file diff --git a/.idea/Obsidian-Daylio-Parser.iml b/.idea/Obsidian-Daylio-Parser.iml deleted file mode 100644 index a2dca91..0000000 --- a/.idea/Obsidian-Daylio-Parser.iml +++ /dev/null @@ -1,15 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<module type="PYTHON_MODULE" version="4"> - <component name="NewModuleRootManager"> - <content url="file://$MODULE_DIR$"> - <sourceFolder url="file://$MODULE_DIR$/_tests" isTestSource="true" /> - <excludeFolder url="file://$MODULE_DIR$/_tests/locked-dir" /> - <excludeFolder url="file://$MODULE_DIR$/_tests/output-results" /> - </content> - <orderEntry type="inheritedJdk" /> - <orderEntry type="sourceFolder" forTests="false" /> - </component> - <component name="PackageRequirementsSettings"> - <option name="requirementsPath" value="" /> - </component> -</module> \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml index 296c362..f187bc3 100644 --- a/.idea/misc.xml +++ b/.idea/misc.xml @@ -1,9 +1,6 @@ <?xml version="1.0" encoding="UTF-8"?> <project version="4"> - <component name="Black"> - <option name="sdkName" value="Pipenv (Obsidian-Daylio-Parser)" /> - </component> - <component name="ProjectRootManager" version="2" project-jdk-name="Pipenv (Obsidian-Daylio-Parser)" project-jdk-type="Python SDK" /> + <component name="ProjectRootManager" version="2" project-jdk-name="$PROJECT_DIR$/../miniconda3" project-jdk-type="Python SDK" /> <component name="PythonCompatibilityInspectionAdvertiser"> <option name="version" value="3" /> </component> diff --git a/_tests/expected_results/2022-10-25.md b/_tests/expected_results/2022-10-25.md index b2e723f..f0ec8a9 100755 --- a/_tests/expected_results/2022-10-25.md +++ b/_tests/expected_results/2022-10-25.md @@ -1,15 +1,13 @@ --- -tags: daily +tags: daily --- -## hungry - 11:36 PM -I felt #hungry with the following: #allegro #working-remotely #colleague-interaction +## hungry | 11:36 PM | Mauris vitae nunc vel arcu consequat auctor +#allegro #working-remotely #colleague-interaction Nulla vel risus eget magna lacinia aliquam ac in arcu. -## rad - 11:40 PM -I felt #rad. +## rad | 11:40 PM Uet nulla nunc lobortis quisque. -## vaguely ok - 5:00 PM -I felt #vaguely-ok. +## vaguely ok | 5:00 PM diff --git a/_tests/expected_results/2022-10-26.md b/_tests/expected_results/2022-10-26.md index 3c10963..b22e48e 100755 --- a/_tests/expected_results/2022-10-26.md +++ b/_tests/expected_results/2022-10-26.md @@ -1,28 +1,28 @@ --- -tags: daily +tags: daily --- -## captivated - 10:00 PM -I felt #captivated with the following: #at-the-office #board-game #colleague-interaction #big-social-gathering +## captivated | 10:00 PM +#at-the-office #board-game #colleague-interaction #big-social-gathering Sed ut est interdum -## tired - 8:00 PM -I felt #tired with the following: #allegro #at-the-office #board-game #colleague-interaction #big-social-gathering +## tired | 8:00 PM | Mauris rutrum diam +#allegro #at-the-office #board-game #colleague-interaction #big-social-gathering Quisque dictum odio quis augue consectetur, at convallis żodio aliquam. -## grateful - 7:30 PM -I felt #grateful with the following: #allegro #at-the-office #acknowledged-efforts #colleague-interaction +## grateful | 7:30 PM | Aliquam nec sem semper +#allegro #at-the-office #acknowledged-efforts #colleague-interaction Nulla aćcumsan sem sit amet lectus pretium, ac interdum tellus porta. -## blissful - 1:00 PM -I felt #blissful with the following: #allegro #at-the-office +## blissful | 1:00 PM | Vestibulum sagittis leo eu sodales +#allegro #at-the-office Ut et elit id lectus hendrerit ełementum quis auctor ipsum. -## in awe - 9:00 AM -I felt #in-awe with the following: #allegro #at-the-office #outdoors #notable-event +## in awe | 9:00 AM | Integer elementum +#allegro #at-the-office #outdoors #notable-event Nunc lobortis enim eu nisi ultrices, sit amet sagittis lacus venenatis. -## lifeless - 7:50 AM -I felt #lifeless with the following: #podcast #politics #world-event +## lifeless | 7:50 AM | Nulla quis lectus pulvinar +#podcast #politics #world-event Etiam commódo enim ut orci varius viverra. diff --git a/_tests/expected_results/2022-10-27.md b/_tests/expected_results/2022-10-27.md index 58ee0f1..6166989 100755 --- a/_tests/expected_results/2022-10-27.md +++ b/_tests/expected_results/2022-10-27.md @@ -1,12 +1,12 @@ --- -tags: daily +tags: daily --- -## vaguely good - 1:49 PM -I felt #vaguely-good with the following: #chess +## vaguely good | 1:49 PM | Cras pretium +#chess Lorem ipsum dolor sit amet, consectetur adipiscing elit. -## fatigued - 12:00 AM -I felt #fatigued with the following: #allegro #working-remotely +## fatigued | 12:00 AM | Suspendisse sit amet +#allegro #working-remotely Phaśellus pharetra justo ac dui lacinia ullamcorper. diff --git a/_tests/expected_results/2022-10-30.md b/_tests/expected_results/2022-10-30.md index b6b6f5f..3c88ad3 100755 --- a/_tests/expected_results/2022-10-30.md +++ b/_tests/expected_results/2022-10-30.md @@ -1,8 +1,8 @@ --- -tags: daily +tags: daily --- -## vaguely ok - 10:04 AM -I felt #vaguely-ok with the following: #2ćities-skylines #dólóó-fas_ą +## vaguely ok | 10:04 AM | Dolomet +#2ćities-skylines #dólóó-fas_ą Lorem ipsum sit dolomet amęt. diff --git a/_tests/test_dated_entries_group.py b/_tests/test_dated_entries_group.py index 3350814..e29767d 100644 --- a/_tests/test_dated_entries_group.py +++ b/_tests/test_dated_entries_group.py @@ -74,9 +74,9 @@ def test_create_dated_entries_groups(self): """ Try to instantiate an object of :class:`DatedEntriesGroup` with either valid or invalid dates """ - self.assertEqual(DatedEntriesGroup("2023-10-15").date, "2023-10-15") - self.assertEqual(DatedEntriesGroup("2019-5-9").date, "2019-5-9") - self.assertEqual(DatedEntriesGroup("2023-11-25").date, "2023-11-25") + self.assertEqual(str(DatedEntriesGroup("2023-10-15")), "2023-10-15") + self.assertEqual(str(DatedEntriesGroup("2019-5-9")), "2019-5-9") + self.assertEqual(str(DatedEntriesGroup("2023-11-25")), "2023-11-25") self.assertRaises(InvalidDateError, DatedEntriesGroup, "00-") self.assertRaises(InvalidDateError, DatedEntriesGroup, "2199-32-32") diff --git a/_tests/test_dated_entry.py b/_tests/test_dated_entry.py index 1283bf2..8607b81 100644 --- a/_tests/test_dated_entry.py +++ b/_tests/test_dated_entry.py @@ -7,10 +7,14 @@ IsNotTimeError +# TODO: more test coverage needed + class TestDatedEntryUtils(TestCase): def test_slice_quotes(self): + # TODO: Flip values in assertions, because unittest's 'first' param is expected, 'second' is actual. self.assertEqual(slice_quotes("\"test\""), "test") self.assertEqual(slice_quotes("\"\""), "") + self.assertEqual(slice_quotes("\" bicycle \""), "bicycle") class TestTime(TestCase): @@ -53,7 +57,7 @@ def test_bare_minimum_dated_entries(self): self.assertTrue(bare_minimum_dated_entry.uid, "1:49 AM") self.assertIsNone(bare_minimum_dated_entry.title) self.assertIsNone(bare_minimum_dated_entry.note) - self.assertTrue(bare_minimum_dated_entry.activities, []) + self.assertListEqual(bare_minimum_dated_entry.activities, []) def test_insufficient_dated_entries(self): self.assertRaises(ValueError, DatedEntry, time="2:00", mood="") diff --git a/_tests/test_librarian.py b/_tests/test_librarian.py index 17500a2..04e7818 100644 --- a/_tests/test_librarian.py +++ b/_tests/test_librarian.py @@ -30,7 +30,7 @@ def test_init_invalid_csv(self): # generates random bytes and writes them into a given file # TODO: make this file locked during runner workflow with chmod 600 - self.assertRaises(librarian.CannotAccessFileError, Librarian, "locked-dir/locked_file.csv") + self.assertRaises(librarian.CannotAccessFileError, Librarian, "_tests/locked-dir/locked_file.csv") def test_valid_access_dates(self): """ @@ -104,16 +104,25 @@ def test_custom_moods_with_invalid_jsons(self): """Pass faulty moods and see if it fails as expected.""" self.assertRaises( librarian.CannotAccessCustomMoodsError, - Librarian, "_tests/_tests/sheet-1-valid-data.csv", "_tests/output-results", "empty_sheet.csv" + Librarian, "_tests/sheet-1-valid-data.csv", "_tests/output-results/", "_tests/empty_sheet.csv" ) def test_custom_moods_when_json_invalid(self): self.assertRaises(librarian.CannotAccessCustomMoodsError, - Librarian, "_tests/_tests/sheet-1-valid-data.csv", "_tests/output-results/", "empty_sheet.csv") + Librarian, + "_tests/sheet-1-valid-data.csv", + "_tests/output-results/", + "_tests/empty_sheet.csv") self.assertRaises(librarian.CannotAccessCustomMoodsError, - Librarian, "_tests/_tests/sheet-1-valid-data.csv", "_tests/output-results/", "missing-file.json") + Librarian, + "_tests/sheet-1-valid-data.csv", + "_tests/output-results/", + "_tests/missing-file.json") self.assertRaises(librarian.CannotAccessCustomMoodsError, - Librarian, "_tests/_tests/sheet-1-valid-data.csv", "_tests/output-results/", "locked-dir/locked_file.csv") + Librarian, + "_tests/sheet-1-valid-data.csv", + "_tests/output-results/", + "_tests/locked-dir/locked_file.csv") def test_custom_moods_that_are_incomplete(self): """ @@ -121,5 +130,9 @@ def test_custom_moods_that_are_incomplete(self): However, it can only expand it (and be truthy) if the dict with moods has all required groups. Therefore, since ``incomplete-moods`` lacks the ``good`` group, the assertion will evaluate to False. """ - lib_to_test = Librarian("_tests/sheet-1-valid-data.csv", "_tests/output-results/", "_tests/incomplete-moods.json") + lib_to_test = Librarian( + "_tests/sheet-1-valid-data.csv", + "_tests/output-results/", + "_tests/incomplete-moods.json" + ) self.assertFalse(lib_to_test.current_mood_set.has_custom_moods) diff --git a/_tests/test_output.py b/_tests/test_output.py new file mode 100644 index 0000000..6675f03 --- /dev/null +++ b/_tests/test_output.py @@ -0,0 +1,350 @@ +import os +import shutil +import io +from unittest import TestCase + +from src.dated_entry import DatedEntry +from src.dated_entries_group import DatedEntriesGroup +from src.librarian import Librarian +from src.config import options + + +class TestDatedEntryOutput(TestCase): + """ + Since the sample entry can output to any stream from :class:`io.IOBase`, you can treat the StringIO as fake file + If the contents outputted to this fake file are the same as contents written directly to another fake stream, + then everything looks good. + + Obviously any change to formatting in the class definition will force changes in this test case. + """ + + def test_bare_minimum_entry_content(self): + """ + Output an entry which hold information only on: + + * time + * mood + """ + # WHEN + # --- + # Create our fake entry as well as a stream that acts like a file + options.tag_activities = True + my_entry = DatedEntry(time="11:00", mood="great", activities="bicycle | chess") + + with io.StringIO() as my_fake_file_stream: + my_entry.output(my_fake_file_stream) + # AND + # --- + # Then create another stream and fill it with the same content, but written directly, not through object + with io.StringIO() as compare_stream: + compare_stream.write("## great | 11:00\r\n") + compare_stream.write("#bicycle #chess") + + # THEN + # --- + # getvalue() returns the entire stream content regardless of current stream position, read() does not. + # https://stackoverflow.com/a/53485819 + self.assertEqual(my_fake_file_stream.getvalue(), compare_stream.getvalue()) + + def test_entry_with_title_no_note(self): + """ + Output an entry which hold information on: + + * time + * mood + * title + """ + # WHEN + # --- + # Create our fake entry as well as a stream that acts like a file + options.tag_activities = True + my_entry = DatedEntry(time="11:00", mood="great", activities="bicycle | chess", title="I'm super pumped!") + + with io.StringIO() as my_fake_file_stream: + my_entry.output(my_fake_file_stream) + # AND + # --- + # Then create another stream and fill it with the same content, but written directly, not through object + with io.StringIO() as compare_stream: + compare_stream.write("## great | 11:00 | I'm super pumped!\r\n") + compare_stream.write("#bicycle #chess") + + # THEN + # --- + self.assertEqual(my_fake_file_stream.getvalue(), compare_stream.getvalue()) + + def test_entry_with_title_and_note(self): + """ + Output an entry which hold information on: + + * time + * mood + * title + * activities + * note + """ + # WHEN + # --- + # Create our fake entry as well as a stream that acts like a file + options.tag_activities = True + my_entry = DatedEntry(time="11:00", mood="great", activities="bicycle | chess", title="I'm super pumped!", + note="I believe I can fly, I believe I can touch the sky.") + + with io.StringIO() as my_fake_file_stream: + my_entry.output(my_fake_file_stream) + # AND + # --- + # Then create another stream and fill it with the same content, but written directly, not through object + with io.StringIO() as compare_stream: + compare_stream.write("## great | 11:00 | I'm super pumped!\r\n") + compare_stream.write("#bicycle #chess\r\n") + compare_stream.write("I believe I can fly, I believe I can touch the sky.") + + # THEN + # --- + self.assertEqual(my_fake_file_stream.getvalue(), compare_stream.getvalue()) + + def test_entry_with_hashtagged_activities(self): + """ + Output an entry which hold information on: + + * time + * mood + * activities (with and without hashtags) + """ + # WHEN + # --- + # Create our fake entry as well as a stream that acts like a file + options.tag_activities = True + my_entry = DatedEntry(time="11:00", mood="great", activities="bicycle | chess") + + with io.StringIO() as my_fake_file_stream: + my_entry.output(my_fake_file_stream) + # AND + # --- + # Then create another stream and fill it with the same content, but written directly, not through object + with io.StringIO() as compare_stream: + compare_stream.write("## great | 11:00\r\n") + compare_stream.write("#bicycle #chess") + + # THEN + # --- + self.assertEqual(my_fake_file_stream.getvalue(), compare_stream.getvalue()) + + # WHEN + # --- + # Create our fake entry as well as a stream that acts like a file + options.tag_activities = False + my_entry = DatedEntry(time="11:00", mood="great", activities="bicycle | chess") + + with io.StringIO() as my_fake_file_stream: + my_entry.output(my_fake_file_stream) + # AND + # --- + # Then create another stream and fill it with the same content, but written directly, not through object + with io.StringIO() as compare_stream: + compare_stream.write("## great | 11:00\r\n") + compare_stream.write("bicycle chess") + + # THEN + # --- + self.assertEqual(my_fake_file_stream.getvalue(), compare_stream.getvalue()) + + +class TestDatedEntriesGroup(TestCase): + def test_outputting_day_with_one_entry(self): + """ + Creates a file-like stream for a day with one valid entry and checks if the file contents are as expected. + """ + # WHEN + # --- + # Create a sample date + sample_date = DatedEntriesGroup("2011-10-10") + sample_date.append_to_known(DatedEntry( + time="10:00 AM", + mood="vaguely ok" + )) + + with io.StringIO() as my_fake_file_stream: + sample_date.output(my_fake_file_stream) + # AND + # --- + # Then create another stream and fill it with the same content, but written directly, not through object + with io.StringIO() as compare_stream: + compare_stream.write("---\r\n") + compare_stream.write("tags: daily\r\n") + compare_stream.write("---\r\n\r\n") + + compare_stream.write("## vaguely ok | 10:00 AM\r\n\r\n") + + # THEN + # --- + self.assertEqual(my_fake_file_stream.getvalue(), compare_stream.getvalue()) + + def test_outputting_day_with_two_entries(self): + """ + Creates a file-like stream for a day with two valid entries and checks if the file contents are as expected. + """ + # WHEN + # --- + # Create a sample date + sample_date = DatedEntriesGroup("2011-10-10") + sample_date.append_to_known(DatedEntry( + time="10:00 AM", + mood="vaguely ok", + activities="bowling", + note="Feeling kinda ok." + )) + sample_date.append_to_known(DatedEntry( + time="9:30 PM", + mood="awful", + title="Everything is going downhill for me" + )) + + with io.StringIO() as my_fake_file_stream: + sample_date.output(my_fake_file_stream) + # AND + # --- + # Then create another stream and fill it with the same content, but written directly, not through object + with io.StringIO() as compare_stream: + compare_stream.write("---\r\n") + compare_stream.write("tags: daily\r\n") + compare_stream.write("---\r\n\r\n") + + compare_stream.write("## vaguely ok | 10:00 AM\r\n") + compare_stream.write("#bowling\r\n") + compare_stream.write("Feeling kinda ok.\r\n\r\n") + + compare_stream.write("## awful | 9:30 PM | Everything is going downhill for me\r\n\r\n") + + # THEN + # --- + self.assertEqual(my_fake_file_stream.getvalue(), compare_stream.getvalue()) + + def test_outputting_day_with_two_entries_and_invalid_filetags(self): + """ + Creates a file-like stream for a day with two valid entries and checks if the file contents are as expected. + The tricky part is that the file tags specified by the user are invalid. + Therefore, the entire section with filetags should be omitted in the file contents. + """ + # WHEN + # --- + # Create a sample date + sample_date = DatedEntriesGroup("2011-10-10") + sample_date.append_to_known(DatedEntry( + time="10:00 AM", + mood="vaguely ok", + activities="bowling", + note="Feeling kinda meh." + )) + sample_date.append_to_known(DatedEntry( + time="9:30 PM", + mood="awful", + title="Everything is going downhill for me" + )) + # Mess up user-configured file tags + options.tags = ["", None] + + with io.StringIO() as my_fake_file_stream: + sample_date.output(my_fake_file_stream) + # AND + # --- + # Then create another stream and fill it with the same content, but written directly, not through object + with io.StringIO() as compare_stream: + compare_stream.write("## vaguely ok | 10:00 AM\r\n") + compare_stream.write("#bowling\r\n") + compare_stream.write("Feeling kinda meh.\r\n\r\n") + + compare_stream.write("## awful | 9:30 PM | Everything is going downhill for me\r\n\r\n") + + # THEN + # --- + self.assertEqual(my_fake_file_stream.getvalue(), compare_stream.getvalue()) + + def test_outputting_day_with_two_entries_and_partially_valid_filetags(self): + """ + Creates a file-like stream for a day with two valid entries and checks if the file contents are as expected. + The tricky part is that the file tags specified by the user are only partially valid. + Therefore, the section will file tags at the beginning of the file should be sanitised. + """ + # WHEN + # --- + # Create a sample date + sample_date = DatedEntriesGroup("2011-10-10") + sample_date.append_to_known(DatedEntry( + time="10:00 AM", + mood="vaguely ok", + activities="bowling", + note="Feeling fine, I guess." + )) + sample_date.append_to_known(DatedEntry( + time="9:30 PM", + mood="awful", + title="Everything is going downhill for me" + )) + # Mess up user-configured file tags + options.tags = ["", "foo", "bar", None] + + with io.StringIO() as my_fake_file_stream: + sample_date.output(my_fake_file_stream) + # AND + # --- + # Then create another stream and fill it with the same content, but written directly, not through object + with io.StringIO() as compare_stream: + compare_stream.write("---\r\n") + compare_stream.write("tags: bar,foo\r\n") + compare_stream.write("---\r\n\r\n") + + compare_stream.write("## vaguely ok | 10:00 AM\r\n") + compare_stream.write("#bowling\r\n") + compare_stream.write("Feeling fine, I guess.\r\n\r\n") + + compare_stream.write("## awful | 9:30 PM | Everything is going downhill for me\r\n\r\n") + + # THEN + # --- + self.assertEqual(my_fake_file_stream.getvalue(), compare_stream.getvalue()) + + +class TestOutputFileStructure(TestCase): + """ + Previous test classes meant to check if each class properly handles its own output. + This checks if the :class:`Librarian` class creates the necessary directories and outputs to files. + """ + + def test_directory_loop(self): + """ + Loops through known dates and asks each :class:`DatedEntriesGroup` to output its contents to a specified file. + """ + options.tags = ["daily"] + + lib = Librarian("_tests/sheet-1-valid-data.csv", path_to_output="_tests/output-results") + lib.output_all() + + with open("_tests/output-results/2022/10/2022-10-25.md", encoding="UTF-8") as parsed_result: + with open("_tests/expected_results/2022-10-25.md", encoding="UTF-8") as expected_result: + self.assertListEqual(parsed_result.readlines(), expected_result.readlines()) + + with open("_tests/output-results/2022/10/2022-10-26.md", encoding="UTF-8") as parsed_result: + with open("_tests/expected_results/2022-10-26.md", encoding="UTF-8") as expected_result: + self.assertListEqual(parsed_result.readlines(), expected_result.readlines()) + + with open("_tests/output-results/2022/10/2022-10-27.md", encoding="UTF-8") as parsed_result: + with open("_tests/expected_results/2022-10-27.md", encoding="UTF-8") as expected_result: + self.assertListEqual(parsed_result.readlines(), expected_result.readlines()) + + with open("_tests/output-results/2022/10/2022-10-30.md", encoding="UTF-8") as parsed_result: + with open("_tests/expected_results/2022-10-30.md", encoding="UTF-8") as expected_result: + self.assertListEqual(parsed_result.readlines(), expected_result.readlines()) + + def tearDown(self) -> None: + folder = '_tests/output-results' + for filename in os.listdir(folder): + file_path = os.path.join(folder, filename) + try: + if os.path.isfile(file_path) or os.path.islink(file_path): + os.unlink(file_path) + elif os.path.isdir(file_path): + shutil.rmtree(file_path) + except OSError as e: + print('Failed to delete %s while cleaning up after a test. Reason: %s' % (file_path, e)) diff --git a/src/dated_entries_group.py b/src/dated_entries_group.py index 081bd27..d04b0e1 100644 --- a/src/dated_entries_group.py +++ b/src/dated_entries_group.py @@ -7,14 +7,18 @@ """ from __future__ import annotations +import io import logging import re +import typing + from src import dated_entry from src import errors from src import utils from src.dated_entry import DatedEntry from src.entry.mood import Moodverse +from src.config import options class DatedEntryMissingError(utils.CustomException): @@ -145,6 +149,9 @@ def __init__(self, date, current_mood_set: Moodverse = Moodverse()): self.__known_entries_for_this_date: dict[str, DatedEntry] = {} self.__known_moods: Moodverse = current_mood_set + def append_to_known(self, entry: DatedEntry) -> None: + self.__known_entries_for_this_date[str(entry.uid)] = entry + def create_dated_entry_from_row(self, line: dict[str, str]) -> dated_entry.DatedEntry: """ @@ -166,6 +173,7 @@ def create_dated_entry_from_row(self, raise IncompleteDataRow(key) # Check if there's already an object with this time + # TODO: Daylio actually allows creating multiple entries and mark them as written at the same time if line["time"] in self.__known_entries_for_this_date: raise TriedCreatingDuplicateDatedEntryError @@ -181,7 +189,8 @@ def create_dated_entry_from_row(self, ) except ValueError: raise ValueError - self.__known_entries_for_this_date[str(this_entry.uid)] = this_entry + + self.append_to_known(this_entry) return this_entry def access_dated_entry(self, time: str) -> DatedEntry: @@ -200,6 +209,38 @@ def access_dated_entry(self, time: str) -> DatedEntry: self.__logger.debug(ErrorMsg.print(ErrorMsg.OBJECT_FOUND, time)) return ref + def output(self, stream: io.IOBase | typing.IO) -> int: + """ + Write entry contents of all :class:`DatedEntry` known directly into the provided buffer stream. + It is the responsibility of the caller to handle the stream afterward. + :raises utils.StreamError: if the passed stream does not support writing to it. + :raises OSError: likely due to lack of space in memory or filesystem, depending on the stream + :param stream: Since it expects the base :class:`io.IOBase` class, it accepts both file and file-like streams. + :returns: how many characters were successfully written into the stream. + """ + if not stream.writable(): + raise utils.StreamError + + chars_written = 0 + # THE BEGINNING OF THE FILE + # when appending file tags at the beginning of the file, discard any duplicates or falsy strings + # sorted() is used to have a deterministic order, set() was random, so I couldn't properly test the output + valid_tags = sorted(set(val for val in options.tags if val)) + if valid_tags: + chars_written += stream.write("---\r\n") + chars_written += stream.write("tags: " + ",".join(valid_tags) + "\r\n") + chars_written += stream.write("---\r\n\r\n") + + # THE ACTUAL ENTRY CONTENTS + # Each DatedEntry object now appends its contents into the stream + for entry in self.__known_entries_for_this_date.values(): + # write returns the number of characters successfully written + # https://docs.python.org/3/library/io.html#io.TextIOBase.write + if entry.output(stream) > 0: + chars_written += stream.write("\r\n\r\n") + + return chars_written + @property def known_entries_from_this_day(self): return self.__known_entries_for_this_date @@ -209,4 +250,4 @@ def date(self): """ :return: String in the format of YYYY-MM-DD that identifies this specific object of :class:`DatedEntryGroup`. """ - return str(self) + return self.uid diff --git a/src/dated_entry.py b/src/dated_entry.py index b487f92..c7da6ec 100644 --- a/src/dated_entry.py +++ b/src/dated_entry.py @@ -9,8 +9,11 @@ import logging import re +import io from typing import Match +import typing + from src import errors from src import utils from src.config import options @@ -104,7 +107,7 @@ def slice_quotes(string: str) -> str: :returns: string without quotation marks in the beginning and end of the initial string, even if it means empty str. """ if string is not None and len(string) > 2: - return string.strip("\"") + return string.strip("\"").strip() # only 2 characters? Then it is an empty cell. return "" @@ -212,8 +215,11 @@ def __init__(self, # --- # Process activities # --- + # TODO: I could make a decouple_and_sanitise() func to strip() and slice the string into a valid array self.__activities = [] - array = slice_quotes(activities).split(options.csv_delimiter) + # empty string "" is unfortunately still a valid element of array which makes it truthy + # I use list comprehension to discard such falsy values from the temporary array + array = [activity for activity in slice_quotes(activities).split(options.csv_delimiter) if activity] if len(array) > 0: for activity in array: self.__activities.append(utils.slugify( @@ -226,7 +232,7 @@ def __init__(self, # Process title # --- self.__title = None - if title is True and len(title) > 0: + if title: self.__title = slice_quotes(title) else: errors.ErrorMsgBase.print(ErrorMsg.WRONG_TITLE) @@ -234,11 +240,45 @@ def __init__(self, # Process note # --- self.__note = None - if note is True and len(note) > 0: + if note: self.__note = slice_quotes(note) else: errors.ErrorMsgBase.print(ErrorMsg.WRONG_NOTE) + def output(self, stream: io.IOBase | typing.IO) -> int: + """ + Write entry contents directly into the provided buffer stream. + It is the responsibility of the caller to handle the stream afterward. + :raises utils.StreamError: if the passed stream does not support writing to it. + :raises OSError: likely due to lack of space in memory or filesystem, depending on the stream + :param stream: Since it expects the base :class:`io.IOBase` class, it accepts both file and file-like streams. + :returns: how many characters were successfully written into the stream. + """ + if not stream.writable(): + raise utils.StreamError + + chars_written = 0 + # HEADER OF THE NOTE + # e.g. "## great | 11:00 AM | Oh my, what a night!" + # options.header is an int that multiplies the # to create headers in markdown + header_elements = [ + options.header * "#" + ' ' + self.__mood, + self.time, + self.__title + ] + header = ' | '.join([el for el in header_elements if el is not None]) + chars_written += stream.write(header) + # ACTIVITIES + # e.g. "bicycle skating pool swimming" + if len(self.__activities) > 0: + chars_written += stream.write('\r\n' + ' '.join(self.__activities)) + # NOTE + # e.g. "Went swimming this evening." + if self.__note is not None: + chars_written += stream.write('\r\n' + self.__note) + + return chars_written + @property def mood(self): return self.__mood @@ -254,3 +294,7 @@ def title(self): @property def note(self): return self.__note + + @property + def time(self): + return str(self.uid) diff --git a/src/librarian.py b/src/librarian.py index 4851d91..c9c4a04 100644 --- a/src/librarian.py +++ b/src/librarian.py @@ -15,7 +15,9 @@ import csv import json +import os import logging +from typing import IO from src import dated_entries_group from src import errors @@ -83,6 +85,15 @@ class InvalidDataInFileError(utils.CustomException): """The file does not follow the expected structure.""" +class NoDestinationSelectedError(utils.CustomException): + """You have not specified where to output the files when instantiating this Librarian object.""" + + +def create_and_open(filename: str, mode: str) -> IO: + os.makedirs(os.path.dirname(filename), exist_ok=True) + return open(filename, mode) + + # I've found a term that describes what this class does - it is a Director - even sounds similar to Librarian # https://refactoring.guru/design-patterns/builder class Librarian: @@ -108,7 +119,7 @@ class Librarian: def __init__(self, path_to_file: str, # the only crucial parameter at this stage - path_to_output: str = None, # TODO: `None` should block any outputting functions + path_to_output: str = None, path_to_moods: str = None): """ :param path_to_file: The path to the CSV file for processing. @@ -118,7 +129,7 @@ def __init__(self, :param path_to_moods: The path for a custom mood set file. """ self.__logger = logging.getLogger(self.__class__.__name__) - self.__known_dates = {} + self.__known_dates: dict[str, DatedEntriesGroup] = {} # Let's start processing the file # --- @@ -177,6 +188,7 @@ def __create_mood_set(self, json_file: str = None) -> 'Moodverse': # - Case 3: argument passed, it is valid = default mood-set expanded by the custom mood-set return Moodverse(custom_mood_set_from_file) + # TODO: should return a tuple of { lines_processed_correctly, all_lines_processed } def __process_file(self, filepath: str) -> bool: """ Validates CSV file and processes it into iterable rows. @@ -257,14 +269,14 @@ def __process_file(self, filepath: str) -> bool: self.__logger.critical(msg) raise InvalidDataInFileError(msg) - # Does it have any rows besides the header? - # If the file is empty or only has column headers, exit immediately - try: - next(raw_lines) - except StopIteration: - msg = ErrorMsg.print(ErrorMsg.FILE_EMPTY, filepath) - self.__logger.critical(msg) - raise InvalidDataInFileError(msg) + # # Does it have any rows besides the header? + # # If the file is empty or only has column headers, exit immediately + # try: + # next(raw_lines) + # except StopIteration: + # msg = ErrorMsg.print(ErrorMsg.FILE_EMPTY, filepath) + # self.__logger.critical(msg) + # raise InvalidDataInFileError(msg) # If the code has reached this point and has not exited, it means both file and contents have to be ok # Processing @@ -322,21 +334,31 @@ def access_date(self, target_date: str) -> DatedEntriesGroup: :return: reference to :class:`DatedEntriesGroup` object """ try: - this_date_group = dated_entries_group.DatedEntriesGroup(target_date, self.__mood_set) + date_lookup = dated_entries_group.Date(target_date) except dated_entries_group.InvalidDateError: raise ValueError - # have you already filed this date? - if this_date_group.date in self.__known_dates: - # yes - self.__logger.debug(ErrorMsg.print(ErrorMsg.OBJECT_FOUND, target_date)) + if str(date_lookup) in self.__known_dates: + return self.__known_dates[str(date_lookup)] else: - # no, add it to my dict - self.__logger.debug(ErrorMsg.print(ErrorMsg.OBJECT_NOT_FOUND, target_date)) - self.__known_dates[this_date_group.date] = this_date_group + new_obj = DatedEntriesGroup(str(date_lookup), self.__mood_set) + self.__known_dates[str(date_lookup)] = new_obj + return new_obj - # in any case - return this_date_group + def output_all(self): + """ + Loops through known dates and calls :class:`DatedEntriesGroup` to output its contents inside the destination. + :raises NoDestinationSelectedError: when the parent object has been instantiated without a destination set. + """ + if self.__destination is None: + raise NoDestinationSelectedError + + for known_date in self.__known_dates.values(): + # "2022/11/09/2022-11-09.md" + filename = str(known_date.date) + ".md" + filepath = "/".join([self.__destination, known_date.date.year, known_date.date.month, filename]) + with create_and_open(filepath, 'a') as file: + known_date.output(file) # Use a dunder overload of getitem to access groups in either way # 1. my_librarian["2022-10-10"] diff --git a/src/utils.py b/src/utils.py index 81d623c..1173715 100755 --- a/src/utils.py +++ b/src/utils.py @@ -36,22 +36,27 @@ def __init__(self, message=None): self.message = message +class StreamError(CustomException): + pass + + def slugify(text: str, taggify: bool): # noinspection SpellCheckingInspection """ Simple slugification function to transform text. Works on non-latin characters too. """ logger = logging.getLogger(__name__) - text = str(text).lower() + text = str(text).lower().strip() # get rid of trailing spaces left after splitting activities apart from one string text = re.sub(re.compile(r"\s+"), '-', text) # Replace spaces with - text = re.sub(re.compile(r"[^\w\-]+"), '', text) # Remove all non-word chars text = re.sub(re.compile(r"--+"), '-', text) # Replace multiple - with single - text = re.sub(re.compile(r"^-+"), '', text) # Trim - from start of text text = re.sub(re.compile(r"-+$"), '', text) # Trim - from end of text + # Checks if the tag is actually a valid tag in Obsidian - still appends the hash even if not, but warns at least if taggify: if re.match('[0-9]', text): logger.warning(ErrorMsg.print(ErrorMsg.INVALID_OBSIDIAN_TAGS, text)) - return text + return '#' + text if taggify else text def expand_path(path): From d74c144b73aa60a254788bd8c1f7aae04ff21779 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Sun, 21 Jan 2024 13:32:37 +0100 Subject: [PATCH 20/40] reraise with traces of the previous error --- _tests/test_output.py | 2 +- src/dated_entries_group.py | 16 +++++++------- src/dated_entry.py | 4 ++-- src/entry/mood.py | 4 ++-- src/librarian.py | 44 +++++++++++++++++++------------------- 5 files changed, 35 insertions(+), 35 deletions(-) diff --git a/_tests/test_output.py b/_tests/test_output.py index 6675f03..b52d6c8 100644 --- a/_tests/test_output.py +++ b/_tests/test_output.py @@ -347,4 +347,4 @@ def tearDown(self) -> None: elif os.path.isdir(file_path): shutil.rmtree(file_path) except OSError as e: - print('Failed to delete %s while cleaning up after a test. Reason: %s' % (file_path, e)) + print(f"Failed to delete {file_path} while cleaning up after a test. Reason: {e}") diff --git a/src/dated_entries_group.py b/src/dated_entries_group.py index d04b0e1..60bd062 100644 --- a/src/dated_entries_group.py +++ b/src/dated_entries_group.py @@ -140,10 +140,10 @@ def __init__(self, date, current_mood_set: Moodverse = Moodverse()): try: super().__init__(Date(date)) # Date is no good? - except InvalidDateError: + except InvalidDateError as err: msg = ErrorMsg.print(ErrorMsg.WRONG_VALUE, date, "YYYY-MM-DD") self.__logger.warning(msg) - raise InvalidDateError(msg) + raise InvalidDateError(msg) from err # All good - initialise self.__known_entries_for_this_date: dict[str, DatedEntry] = {} @@ -166,8 +166,8 @@ def create_dated_entry_from_row(self, for key in ["time", "mood"]: try: line[key] - except KeyError: - raise IncompleteDataRow(key) + except KeyError as err: + raise IncompleteDataRow(key) from err # is it empty then, maybe? if not line[key]: raise IncompleteDataRow(key) @@ -187,8 +187,8 @@ def create_dated_entry_from_row(self, note=line["note"], override_mood_set=self.__known_moods ) - except ValueError: - raise ValueError + except ValueError as err: + raise ValueError from err self.append_to_known(this_entry) return this_entry @@ -202,10 +202,10 @@ def access_dated_entry(self, time: str) -> DatedEntry: """ try: ref = self.__known_entries_for_this_date[time] - except KeyError: + except KeyError as err: msg = ErrorMsg.print(ErrorMsg.OBJECT_NOT_FOUND, time) self.__logger.warning(msg) - raise DatedEntryMissingError(msg) + raise DatedEntryMissingError(msg) from err self.__logger.debug(ErrorMsg.print(ErrorMsg.OBJECT_FOUND, time)) return ref diff --git a/src/dated_entry.py b/src/dated_entry.py index c7da6ec..622c434 100644 --- a/src/dated_entry.py +++ b/src/dated_entry.py @@ -196,9 +196,9 @@ def __init__(self, # --- try: super().__init__(Time(time)) - except IsNotTimeError: + except IsNotTimeError as err: errors.ErrorMsgBase.print(ErrorMsg.WRONG_TIME, time) - raise ValueError + raise ValueError from err # --- # MOOD diff --git a/src/entry/mood.py b/src/entry/mood.py index 5200623..7ec319c 100644 --- a/src/entry/mood.py +++ b/src/entry/mood.py @@ -103,10 +103,10 @@ def __expand_moodset_with_customs(self, moods_to_process: dict[str, List[str]]) try: # Leap of faith - there must be a "rad" group, otherwise there's no point in continuing mood_group_to_transfer = moods_to_process[expected_mood_group] - except KeyError: + except KeyError as err: msg = ErrorMsg.print(ErrorMsg.MOOD_GROUP_NOT_FOUND, expected_mood_group) self.__logger.warning(msg) - raise MoodNotFoundError(msg) + raise MoodNotFoundError(msg) from err # Go through each mood in this mood group - e.g. rad - and transfer them in the form of Mood objects for mood in mood_group_to_transfer: self.__mood_set[expected_mood_group].create_mood(mood) diff --git a/src/librarian.py b/src/librarian.py index c9c4a04..a1a650c 100644 --- a/src/librarian.py +++ b/src/librarian.py @@ -138,16 +138,16 @@ def __init__(self, # P.S Why am I starting first with moods? Because process_file first checks if it has moods installed. try: self.__mood_set = self.__create_mood_set(path_to_moods) - except CannotAccessFileError: - raise CannotAccessCustomMoodsError + except CannotAccessFileError as err: + raise CannotAccessCustomMoodsError from err # 2. Access the CSV file and get all the rows with content # then pass the data to specialised data objects that can handle them in a structured way # TODO: Deal with files that are valid but at the end of parsing have zero lines successfully parsed try: self.__process_file(path_to_file) - except (CannotAccessFileError, InvalidDataInFileError): - raise CannotAccessJournalError + except (CannotAccessFileError, InvalidDataInFileError) as err: + raise CannotAccessJournalError from err # Ok, if no exceptions were raised so far, the file is good, let's go through the rest of the attributes self.__destination = path_to_output @@ -167,18 +167,18 @@ def __create_mood_set(self, json_file: str = None) -> 'Moodverse': try: with open(exp_path, encoding="UTF-8") as file: custom_mood_set_from_file = json.load(file) - except FileNotFoundError: + except FileNotFoundError as err: msg = ErrorMsg.print(ErrorMsg.FILE_MISSING, exp_path) self.__logger.warning(msg) - raise CannotAccessFileError(msg) - except PermissionError: + raise CannotAccessFileError(msg) from err + except PermissionError as err: msg = ErrorMsg.print(ErrorMsg.PERMISSION_ERROR, exp_path) self.__logger.warning(msg) - raise CannotAccessFileError(msg) - except json.JSONDecodeError: + raise CannotAccessFileError(msg) from err + except json.JSONDecodeError as err: msg = ErrorMsg.print(ErrorMsg.DECODE_ERROR, exp_path) self.__logger.warning(msg) - raise CannotAccessFileError(msg) + raise CannotAccessFileError(msg) from err else: custom_mood_set_from_file = None @@ -205,19 +205,19 @@ def __process_file(self, filepath: str) -> bool: try: file = open(filepath, newline='', encoding='UTF-8') # File has not been found - except FileNotFoundError: + except FileNotFoundError as err: msg = ErrorMsg.print(ErrorMsg.FILE_MISSING, filepath) self.__logger.critical(msg) - raise CannotAccessFileError(msg) + raise CannotAccessFileError(msg) from err # Insufficient permissions to access the file - except PermissionError: + except PermissionError as err: msg = ErrorMsg.print(ErrorMsg.PERMISSION_ERROR, filepath) self.__logger.critical(msg) - raise CannotAccessFileError(msg) + raise CannotAccessFileError(msg) from err # Other error that makes it impossible to access the file - except OSError: + except OSError as err: self.__logger.critical(OSError) - raise CannotAccessFileError + raise CannotAccessFileError from err # If the code reaches here, the program can access the file. # Now let's determine if the file's contents are actually usable @@ -228,10 +228,10 @@ def __process_file(self, filepath: str) -> bool: try: # strict parameter throws csv.Error if parsing fails raw_lines = csv.DictReader(file, delimiter=',', quotechar='"', strict=True) - except csv.Error: + except csv.Error as err: msg = ErrorMsg.print(ErrorMsg.DECODE_ERROR, filepath) self.__logger.critical(msg) - raise InvalidDataInFileError(msg) + raise InvalidDataInFileError(msg) from err # Does it have all the fields? Push any missing field into an array for later reference # Even if only one column from the list below is missing in the CSV, it's a problem while parsing later @@ -254,10 +254,10 @@ def __process_file(self, filepath: str) -> bool: missing_strings = [ expected_field for expected_field in expected_fields if expected_field not in raw_lines.fieldnames ] - except (csv.Error, UnicodeDecodeError): + except (csv.Error, UnicodeDecodeError) as err: msg = ErrorMsg.print(ErrorMsg.DECODE_ERROR, filepath) self.__logger.critical(msg) - raise InvalidDataInFileError(msg) + raise InvalidDataInFileError(msg) from err if not missing_strings: self.__logger.debug(ErrorMsg.print(ErrorMsg.CSV_ALL_FIELDS_PRESENT)) @@ -335,8 +335,8 @@ def access_date(self, target_date: str) -> DatedEntriesGroup: """ try: date_lookup = dated_entries_group.Date(target_date) - except dated_entries_group.InvalidDateError: - raise ValueError + except dated_entries_group.InvalidDateError as err: + raise ValueError from err if str(date_lookup) in self.__known_dates: return self.__known_dates[str(date_lookup)] From 0e16bc710381db6166d0eace746904f6faaa543d Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Sun, 21 Jan 2024 13:36:09 +0100 Subject: [PATCH 21/40] fixed encoding on Windows --- src/librarian.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/librarian.py b/src/librarian.py index a1a650c..ba7698a 100644 --- a/src/librarian.py +++ b/src/librarian.py @@ -91,7 +91,7 @@ class NoDestinationSelectedError(utils.CustomException): def create_and_open(filename: str, mode: str) -> IO: os.makedirs(os.path.dirname(filename), exist_ok=True) - return open(filename, mode) + return open(filename, mode, encoding="UTF-8") # I've found a term that describes what this class does - it is a Director - even sounds similar to Librarian From 3464b9d5c61a4f0794f6507bca4fae592a6574ed Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Sun, 21 Jan 2024 13:54:44 +0100 Subject: [PATCH 22/40] os.linesep instead of '\r\n' --- _tests/test_output.py | 56 +++++++++++++++++++------------------- src/dated_entries_group.py | 9 +++--- src/dated_entry.py | 5 ++-- 3 files changed, 36 insertions(+), 34 deletions(-) diff --git a/_tests/test_output.py b/_tests/test_output.py index b52d6c8..2417ebd 100644 --- a/_tests/test_output.py +++ b/_tests/test_output.py @@ -37,7 +37,7 @@ def test_bare_minimum_entry_content(self): # --- # Then create another stream and fill it with the same content, but written directly, not through object with io.StringIO() as compare_stream: - compare_stream.write("## great | 11:00\r\n") + compare_stream.write("## great | 11:00" + os.linesep) compare_stream.write("#bicycle #chess") # THEN @@ -66,7 +66,7 @@ def test_entry_with_title_no_note(self): # --- # Then create another stream and fill it with the same content, but written directly, not through object with io.StringIO() as compare_stream: - compare_stream.write("## great | 11:00 | I'm super pumped!\r\n") + compare_stream.write("## great | 11:00 | I'm super pumped!" + os.linesep) compare_stream.write("#bicycle #chess") # THEN @@ -96,8 +96,8 @@ def test_entry_with_title_and_note(self): # --- # Then create another stream and fill it with the same content, but written directly, not through object with io.StringIO() as compare_stream: - compare_stream.write("## great | 11:00 | I'm super pumped!\r\n") - compare_stream.write("#bicycle #chess\r\n") + compare_stream.write("## great | 11:00 | I'm super pumped!" + os.linesep) + compare_stream.write("#bicycle #chess" + os.linesep) compare_stream.write("I believe I can fly, I believe I can touch the sky.") # THEN @@ -124,7 +124,7 @@ def test_entry_with_hashtagged_activities(self): # --- # Then create another stream and fill it with the same content, but written directly, not through object with io.StringIO() as compare_stream: - compare_stream.write("## great | 11:00\r\n") + compare_stream.write("## great | 11:00" + os.linesep) compare_stream.write("#bicycle #chess") # THEN @@ -143,7 +143,7 @@ def test_entry_with_hashtagged_activities(self): # --- # Then create another stream and fill it with the same content, but written directly, not through object with io.StringIO() as compare_stream: - compare_stream.write("## great | 11:00\r\n") + compare_stream.write("## great | 11:00" + os.linesep) compare_stream.write("bicycle chess") # THEN @@ -171,11 +171,11 @@ def test_outputting_day_with_one_entry(self): # --- # Then create another stream and fill it with the same content, but written directly, not through object with io.StringIO() as compare_stream: - compare_stream.write("---\r\n") - compare_stream.write("tags: daily\r\n") - compare_stream.write("---\r\n\r\n") + compare_stream.write("---" + os.linesep) + compare_stream.write("tags: daily" + os.linesep) + compare_stream.write("---" + os.linesep*2) - compare_stream.write("## vaguely ok | 10:00 AM\r\n\r\n") + compare_stream.write("## vaguely ok | 10:00 AM" + os.linesep*2) # THEN # --- @@ -207,15 +207,15 @@ def test_outputting_day_with_two_entries(self): # --- # Then create another stream and fill it with the same content, but written directly, not through object with io.StringIO() as compare_stream: - compare_stream.write("---\r\n") - compare_stream.write("tags: daily\r\n") - compare_stream.write("---\r\n\r\n") + compare_stream.write("---" + os.linesep) + compare_stream.write("tags: daily" + os.linesep) + compare_stream.write("---" + os.linesep*2) - compare_stream.write("## vaguely ok | 10:00 AM\r\n") - compare_stream.write("#bowling\r\n") - compare_stream.write("Feeling kinda ok.\r\n\r\n") + compare_stream.write("## vaguely ok | 10:00 AM" + os.linesep) + compare_stream.write("#bowling" + os.linesep) + compare_stream.write("Feeling kinda ok." + os.linesep*2) - compare_stream.write("## awful | 9:30 PM | Everything is going downhill for me\r\n\r\n") + compare_stream.write("## awful | 9:30 PM | Everything is going downhill for me" + os.linesep*2) # THEN # --- @@ -251,11 +251,11 @@ def test_outputting_day_with_two_entries_and_invalid_filetags(self): # --- # Then create another stream and fill it with the same content, but written directly, not through object with io.StringIO() as compare_stream: - compare_stream.write("## vaguely ok | 10:00 AM\r\n") - compare_stream.write("#bowling\r\n") - compare_stream.write("Feeling kinda meh.\r\n\r\n") + compare_stream.write("## vaguely ok | 10:00 AM" + os.linesep) + compare_stream.write("#bowling" + os.linesep) + compare_stream.write("Feeling kinda meh." + os.linesep*2) - compare_stream.write("## awful | 9:30 PM | Everything is going downhill for me\r\n\r\n") + compare_stream.write("## awful | 9:30 PM | Everything is going downhill for me" + os.linesep*2) # THEN # --- @@ -291,15 +291,15 @@ def test_outputting_day_with_two_entries_and_partially_valid_filetags(self): # --- # Then create another stream and fill it with the same content, but written directly, not through object with io.StringIO() as compare_stream: - compare_stream.write("---\r\n") - compare_stream.write("tags: bar,foo\r\n") - compare_stream.write("---\r\n\r\n") + compare_stream.write("---" + os.linesep) + compare_stream.write("tags: bar,foo" + os.linesep) + compare_stream.write("---" + os.linesep*2) - compare_stream.write("## vaguely ok | 10:00 AM\r\n") - compare_stream.write("#bowling\r\n") - compare_stream.write("Feeling fine, I guess.\r\n\r\n") + compare_stream.write("## vaguely ok | 10:00 AM" + os.linesep) + compare_stream.write("#bowling" + os.linesep) + compare_stream.write("Feeling fine, I guess." + os.linesep*2) - compare_stream.write("## awful | 9:30 PM | Everything is going downhill for me\r\n\r\n") + compare_stream.write("## awful | 9:30 PM | Everything is going downhill for me" + os.linesep*2) # THEN # --- diff --git a/src/dated_entries_group.py b/src/dated_entries_group.py index 60bd062..e3fa969 100644 --- a/src/dated_entries_group.py +++ b/src/dated_entries_group.py @@ -10,6 +10,7 @@ import io import logging import re +import os # used for linesep only import typing @@ -227,9 +228,9 @@ def output(self, stream: io.IOBase | typing.IO) -> int: # sorted() is used to have a deterministic order, set() was random, so I couldn't properly test the output valid_tags = sorted(set(val for val in options.tags if val)) if valid_tags: - chars_written += stream.write("---\r\n") - chars_written += stream.write("tags: " + ",".join(valid_tags) + "\r\n") - chars_written += stream.write("---\r\n\r\n") + chars_written += stream.write("---" + os.linesep) + chars_written += stream.write("tags: " + ",".join(valid_tags) + os.linesep) + chars_written += stream.write("---" + os.linesep*2) # THE ACTUAL ENTRY CONTENTS # Each DatedEntry object now appends its contents into the stream @@ -237,7 +238,7 @@ def output(self, stream: io.IOBase | typing.IO) -> int: # write returns the number of characters successfully written # https://docs.python.org/3/library/io.html#io.TextIOBase.write if entry.output(stream) > 0: - chars_written += stream.write("\r\n\r\n") + chars_written += stream.write(os.linesep*2) return chars_written diff --git a/src/dated_entry.py b/src/dated_entry.py index 622c434..5d814fa 100644 --- a/src/dated_entry.py +++ b/src/dated_entry.py @@ -8,6 +8,7 @@ from __future__ import annotations import logging +import os # used only for linesep import re import io from typing import Match @@ -271,11 +272,11 @@ def output(self, stream: io.IOBase | typing.IO) -> int: # ACTIVITIES # e.g. "bicycle skating pool swimming" if len(self.__activities) > 0: - chars_written += stream.write('\r\n' + ' '.join(self.__activities)) + chars_written += stream.write(os.linesep + ' '.join(self.__activities)) # NOTE # e.g. "Went swimming this evening." if self.__note is not None: - chars_written += stream.write('\r\n' + self.__note) + chars_written += stream.write(os.linesep + self.__note) return chars_written From 6b31b29d50e3a8b94453d1dd46881d0e6eca956e Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Sun, 21 Jan 2024 14:07:21 +0100 Subject: [PATCH 23/40] added autocrlf support in .gitattributes Signed-off-by: DeutscheGabanna <thetobruk@duck.com> --- .gitattributes | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..09bc62c --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +# Set the default behavior, in case people don't have core.autocrlf set. +* text=auto \ No newline at end of file From 5e9f058c6df2adc4c9c8e62f21fe52e3d99d3e0b Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Sun, 21 Jan 2024 14:28:54 +0100 Subject: [PATCH 24/40] changed universal newline mode Signed-off-by: DeutscheGabanna <thetobruk@duck.com> --- _tests/test_output.py | 12 ++++++------ src/librarian.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/_tests/test_output.py b/_tests/test_output.py index 2417ebd..05928db 100644 --- a/_tests/test_output.py +++ b/_tests/test_output.py @@ -321,16 +321,16 @@ def test_directory_loop(self): lib = Librarian("_tests/sheet-1-valid-data.csv", path_to_output="_tests/output-results") lib.output_all() - with open("_tests/output-results/2022/10/2022-10-25.md", encoding="UTF-8") as parsed_result: - with open("_tests/expected_results/2022-10-25.md", encoding="UTF-8") as expected_result: + with open("_tests/output-results/2022/10/2022-10-25.md", encoding="UTF-8", newline='') as parsed_result: + with open("_tests/expected_results/2022-10-25.md", encoding="UTF-8", newline='') as expected_result: self.assertListEqual(parsed_result.readlines(), expected_result.readlines()) - with open("_tests/output-results/2022/10/2022-10-26.md", encoding="UTF-8") as parsed_result: - with open("_tests/expected_results/2022-10-26.md", encoding="UTF-8") as expected_result: + with open("_tests/output-results/2022/10/2022-10-26.md", encoding="UTF-8", newline='') as parsed_result: + with open("_tests/expected_results/2022-10-26.md", encoding="UTF-8", newline='') as expected_result: self.assertListEqual(parsed_result.readlines(), expected_result.readlines()) - with open("_tests/output-results/2022/10/2022-10-27.md", encoding="UTF-8") as parsed_result: - with open("_tests/expected_results/2022-10-27.md", encoding="UTF-8") as expected_result: + with open("_tests/output-results/2022/10/2022-10-27.md", encoding="UTF-8", newline='') as parsed_result: + with open("_tests/expected_results/2022-10-27.md", encoding="UTF-8", newline='') as expected_result: self.assertListEqual(parsed_result.readlines(), expected_result.readlines()) with open("_tests/output-results/2022/10/2022-10-30.md", encoding="UTF-8") as parsed_result: diff --git a/src/librarian.py b/src/librarian.py index ba7698a..dedee9e 100644 --- a/src/librarian.py +++ b/src/librarian.py @@ -91,7 +91,7 @@ class NoDestinationSelectedError(utils.CustomException): def create_and_open(filename: str, mode: str) -> IO: os.makedirs(os.path.dirname(filename), exist_ok=True) - return open(filename, mode, encoding="UTF-8") + return open(filename, mode, encoding="UTF-8", newline='') # I've found a term that describes what this class does - it is a Director - even sounds similar to Librarian From 9bb3d82eb3980870a4eef09ec54e0cddaa64bf3d Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Mon, 22 Jan 2024 17:52:38 +0100 Subject: [PATCH 25/40] reverts: os.linesep but now '\n' instead of '\r\n' to hopefully avoid double spaces on windows Signed-off-by: DeutscheGabanna <thetobruk@duck.com> --- src/dated_entries_group.py | 12 ++++++++---- src/dated_entry.py | 4 ++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/dated_entries_group.py b/src/dated_entries_group.py index e3fa969..c42e622 100644 --- a/src/dated_entries_group.py +++ b/src/dated_entries_group.py @@ -228,9 +228,13 @@ def output(self, stream: io.IOBase | typing.IO) -> int: # sorted() is used to have a deterministic order, set() was random, so I couldn't properly test the output valid_tags = sorted(set(val for val in options.tags if val)) if valid_tags: - chars_written += stream.write("---" + os.linesep) - chars_written += stream.write("tags: " + ",".join(valid_tags) + os.linesep) - chars_written += stream.write("---" + os.linesep*2) + # why '\n' instead of os.linesep? + # > Do not use os.linesep as a line terminator when writing files opened in text mode (the default); + # > use a single '\n' instead, on all platforms. + # https://docs.python.org/3.10/library/os.html#os.linesep + chars_written += stream.write("---" + "\n") + chars_written += stream.write("tags: " + ",".join(valid_tags) + "\n") + chars_written += stream.write("---" + "\n"*2) # THE ACTUAL ENTRY CONTENTS # Each DatedEntry object now appends its contents into the stream @@ -238,7 +242,7 @@ def output(self, stream: io.IOBase | typing.IO) -> int: # write returns the number of characters successfully written # https://docs.python.org/3/library/io.html#io.TextIOBase.write if entry.output(stream) > 0: - chars_written += stream.write(os.linesep*2) + chars_written += stream.write("\n"*2) return chars_written diff --git a/src/dated_entry.py b/src/dated_entry.py index 5d814fa..4bd20e6 100644 --- a/src/dated_entry.py +++ b/src/dated_entry.py @@ -272,11 +272,11 @@ def output(self, stream: io.IOBase | typing.IO) -> int: # ACTIVITIES # e.g. "bicycle skating pool swimming" if len(self.__activities) > 0: - chars_written += stream.write(os.linesep + ' '.join(self.__activities)) + chars_written += stream.write("\n" + ' '.join(self.__activities)) # NOTE # e.g. "Went swimming this evening." if self.__note is not None: - chars_written += stream.write(os.linesep + self.__note) + chars_written += stream.write("\n" + self.__note) return chars_written From fc43242bc33ba1c1bade21626c53b17629115c9c Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Mon, 22 Jan 2024 17:55:59 +0100 Subject: [PATCH 26/40] reverts: os.linesep but now '\n' instead of '\r\n' to hopefully avoid double spaces on windows Signed-off-by: DeutscheGabanna <thetobruk@duck.com> --- _tests/test_output.py | 56 +++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/_tests/test_output.py b/_tests/test_output.py index 05928db..8d9c109 100644 --- a/_tests/test_output.py +++ b/_tests/test_output.py @@ -37,7 +37,7 @@ def test_bare_minimum_entry_content(self): # --- # Then create another stream and fill it with the same content, but written directly, not through object with io.StringIO() as compare_stream: - compare_stream.write("## great | 11:00" + os.linesep) + compare_stream.write("## great | 11:00" + "\n") compare_stream.write("#bicycle #chess") # THEN @@ -66,7 +66,7 @@ def test_entry_with_title_no_note(self): # --- # Then create another stream and fill it with the same content, but written directly, not through object with io.StringIO() as compare_stream: - compare_stream.write("## great | 11:00 | I'm super pumped!" + os.linesep) + compare_stream.write("## great | 11:00 | I'm super pumped!" + "\n") compare_stream.write("#bicycle #chess") # THEN @@ -96,8 +96,8 @@ def test_entry_with_title_and_note(self): # --- # Then create another stream and fill it with the same content, but written directly, not through object with io.StringIO() as compare_stream: - compare_stream.write("## great | 11:00 | I'm super pumped!" + os.linesep) - compare_stream.write("#bicycle #chess" + os.linesep) + compare_stream.write("## great | 11:00 | I'm super pumped!" + "\n") + compare_stream.write("#bicycle #chess" + "\n") compare_stream.write("I believe I can fly, I believe I can touch the sky.") # THEN @@ -124,7 +124,7 @@ def test_entry_with_hashtagged_activities(self): # --- # Then create another stream and fill it with the same content, but written directly, not through object with io.StringIO() as compare_stream: - compare_stream.write("## great | 11:00" + os.linesep) + compare_stream.write("## great | 11:00" + "\n") compare_stream.write("#bicycle #chess") # THEN @@ -143,7 +143,7 @@ def test_entry_with_hashtagged_activities(self): # --- # Then create another stream and fill it with the same content, but written directly, not through object with io.StringIO() as compare_stream: - compare_stream.write("## great | 11:00" + os.linesep) + compare_stream.write("## great | 11:00" + "\n") compare_stream.write("bicycle chess") # THEN @@ -171,11 +171,11 @@ def test_outputting_day_with_one_entry(self): # --- # Then create another stream and fill it with the same content, but written directly, not through object with io.StringIO() as compare_stream: - compare_stream.write("---" + os.linesep) - compare_stream.write("tags: daily" + os.linesep) - compare_stream.write("---" + os.linesep*2) + compare_stream.write("---" + "\n") + compare_stream.write("tags: daily" + "\n") + compare_stream.write("---" + "\n"*2) - compare_stream.write("## vaguely ok | 10:00 AM" + os.linesep*2) + compare_stream.write("## vaguely ok | 10:00 AM" + "\n"*2) # THEN # --- @@ -207,15 +207,15 @@ def test_outputting_day_with_two_entries(self): # --- # Then create another stream and fill it with the same content, but written directly, not through object with io.StringIO() as compare_stream: - compare_stream.write("---" + os.linesep) - compare_stream.write("tags: daily" + os.linesep) - compare_stream.write("---" + os.linesep*2) + compare_stream.write("---" + "\n") + compare_stream.write("tags: daily" + "\n") + compare_stream.write("---" + "\n"*2) - compare_stream.write("## vaguely ok | 10:00 AM" + os.linesep) - compare_stream.write("#bowling" + os.linesep) - compare_stream.write("Feeling kinda ok." + os.linesep*2) + compare_stream.write("## vaguely ok | 10:00 AM" + "\n") + compare_stream.write("#bowling" + "\n") + compare_stream.write("Feeling kinda ok." + "\n"*2) - compare_stream.write("## awful | 9:30 PM | Everything is going downhill for me" + os.linesep*2) + compare_stream.write("## awful | 9:30 PM | Everything is going downhill for me" + "\n"*2) # THEN # --- @@ -251,11 +251,11 @@ def test_outputting_day_with_two_entries_and_invalid_filetags(self): # --- # Then create another stream and fill it with the same content, but written directly, not through object with io.StringIO() as compare_stream: - compare_stream.write("## vaguely ok | 10:00 AM" + os.linesep) - compare_stream.write("#bowling" + os.linesep) - compare_stream.write("Feeling kinda meh." + os.linesep*2) + compare_stream.write("## vaguely ok | 10:00 AM" + "\n") + compare_stream.write("#bowling" + "\n") + compare_stream.write("Feeling kinda meh." + "\n"*2) - compare_stream.write("## awful | 9:30 PM | Everything is going downhill for me" + os.linesep*2) + compare_stream.write("## awful | 9:30 PM | Everything is going downhill for me" + "\n"*2) # THEN # --- @@ -291,15 +291,15 @@ def test_outputting_day_with_two_entries_and_partially_valid_filetags(self): # --- # Then create another stream and fill it with the same content, but written directly, not through object with io.StringIO() as compare_stream: - compare_stream.write("---" + os.linesep) - compare_stream.write("tags: bar,foo" + os.linesep) - compare_stream.write("---" + os.linesep*2) + compare_stream.write("---" + "\n") + compare_stream.write("tags: bar,foo" + "\n") + compare_stream.write("---" + "\n"*2) - compare_stream.write("## vaguely ok | 10:00 AM" + os.linesep) - compare_stream.write("#bowling" + os.linesep) - compare_stream.write("Feeling fine, I guess." + os.linesep*2) + compare_stream.write("## vaguely ok | 10:00 AM" + "\n") + compare_stream.write("#bowling" + "\n") + compare_stream.write("Feeling fine, I guess." + "\n"*2) - compare_stream.write("## awful | 9:30 PM | Everything is going downhill for me" + os.linesep*2) + compare_stream.write("## awful | 9:30 PM | Everything is going downhill for me" + "\n"*2) # THEN # --- From aaec9950e398b373da55615fe653b7f4fbafc378 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Mon, 22 Jan 2024 18:01:39 +0100 Subject: [PATCH 27/40] delete newline='' from open() calls as it disables universal newline mode Signed-off-by: DeutscheGabanna <thetobruk@duck.com> --- _tests/test_output.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/_tests/test_output.py b/_tests/test_output.py index 8d9c109..0642c0b 100644 --- a/_tests/test_output.py +++ b/_tests/test_output.py @@ -321,16 +321,16 @@ def test_directory_loop(self): lib = Librarian("_tests/sheet-1-valid-data.csv", path_to_output="_tests/output-results") lib.output_all() - with open("_tests/output-results/2022/10/2022-10-25.md", encoding="UTF-8", newline='') as parsed_result: - with open("_tests/expected_results/2022-10-25.md", encoding="UTF-8", newline='') as expected_result: + with open("_tests/output-results/2022/10/2022-10-25.md", encoding="UTF-8") as parsed_result: + with open("_tests/expected_results/2022-10-25.md", encoding="UTF-8") as expected_result: self.assertListEqual(parsed_result.readlines(), expected_result.readlines()) - with open("_tests/output-results/2022/10/2022-10-26.md", encoding="UTF-8", newline='') as parsed_result: - with open("_tests/expected_results/2022-10-26.md", encoding="UTF-8", newline='') as expected_result: + with open("_tests/output-results/2022/10/2022-10-26.md", encoding="UTF-8") as parsed_result: + with open("_tests/expected_results/2022-10-26.md", encoding="UTF-8") as expected_result: self.assertListEqual(parsed_result.readlines(), expected_result.readlines()) - with open("_tests/output-results/2022/10/2022-10-27.md", encoding="UTF-8", newline='') as parsed_result: - with open("_tests/expected_results/2022-10-27.md", encoding="UTF-8", newline='') as expected_result: + with open("_tests/output-results/2022/10/2022-10-27.md", encoding="UTF-8") as parsed_result: + with open("_tests/expected_results/2022-10-27.md", encoding="UTF-8") as expected_result: self.assertListEqual(parsed_result.readlines(), expected_result.readlines()) with open("_tests/output-results/2022/10/2022-10-30.md", encoding="UTF-8") as parsed_result: From 36b9a880f3e78a4459665bb11dbdd42d71c1519f Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Mon, 22 Jan 2024 18:02:04 +0100 Subject: [PATCH 28/40] delete newline='' from open() calls as it disables universal newline mode Signed-off-by: DeutscheGabanna <thetobruk@duck.com> --- src/librarian.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/librarian.py b/src/librarian.py index dedee9e..a10146b 100644 --- a/src/librarian.py +++ b/src/librarian.py @@ -91,7 +91,7 @@ class NoDestinationSelectedError(utils.CustomException): def create_and_open(filename: str, mode: str) -> IO: os.makedirs(os.path.dirname(filename), exist_ok=True) - return open(filename, mode, encoding="UTF-8", newline='') + return open(filename, mode, encoding="UTF-8") # I've found a term that describes what this class does - it is a Director - even sounds similar to Librarian @@ -203,7 +203,7 @@ def __process_file(self, filepath: str) -> bool: # Let's determine if the file can be opened # --- try: - file = open(filepath, newline='', encoding='UTF-8') + file = open(filepath, encoding='UTF-8') # File has not been found except FileNotFoundError as err: msg = ErrorMsg.print(ErrorMsg.FILE_MISSING, filepath) From 7651a2fb3dc1d230b72008191464a237f49a6d42 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Mon, 22 Jan 2024 18:19:33 +0100 Subject: [PATCH 29/40] added BOM UTF-8 to problematic file Signed-off-by: DeutscheGabanna <thetobruk@duck.com> --- _tests/expected_results/2022-10-30.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_tests/expected_results/2022-10-30.md b/_tests/expected_results/2022-10-30.md index 3c88ad3..73becc3 100755 --- a/_tests/expected_results/2022-10-30.md +++ b/_tests/expected_results/2022-10-30.md @@ -1,4 +1,4 @@ ---- +--- tags: daily --- From 1dd06c3d87135952e4a3bbbdb152352de5266b1a Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Mon, 22 Jan 2024 18:20:37 +0100 Subject: [PATCH 30/40] This reverts commit 7651a2fb3dc1d230b72008191464a237f49a6d42. Signed-off-by: DeutscheGabanna <thetobruk@duck.com> --- _tests/expected_results/2022-10-30.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_tests/expected_results/2022-10-30.md b/_tests/expected_results/2022-10-30.md index 73becc3..3c88ad3 100755 --- a/_tests/expected_results/2022-10-30.md +++ b/_tests/expected_results/2022-10-30.md @@ -1,4 +1,4 @@ ---- +--- tags: daily --- From f455b81ca7e3242678c5d472b32854a7b24246fd Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Wed, 24 Jan 2024 18:20:52 +0100 Subject: [PATCH 31/40] switched expected and actual results in assertions Signed-off-by: DeutscheGabanna <thetobruk@duck.com> --- _tests/test_config.py | 26 +++++++-------- _tests/test_dated_entries_group.py | 14 ++++---- _tests/test_dated_entry.py | 12 +++---- _tests/test_mood.py | 52 +++++++++++++++--------------- _tests/test_output.py | 26 +++++++-------- _tests/test_utils.py | 8 ++--- 6 files changed, 69 insertions(+), 69 deletions(-) diff --git a/_tests/test_config.py b/_tests/test_config.py index e2ae8aa..6dd3d18 100644 --- a/_tests/test_config.py +++ b/_tests/test_config.py @@ -20,12 +20,12 @@ def test_spoofed_keyword_option_without_equality_sign(self): # User input not in the dictionary of allowed options - should fail with self.assertRaises(SystemExit) as cm: options_to_check.parse_console(["--force", "yo-mama"]) - self.assertEqual(cm.exception.code, 2, msg="Invalid arguments were passed to argparse so it should exit with 2") + self.assertEqual(2, cm.exception.code, msg="Invalid arguments were passed to argparse so it should exit with 2") # User input provided both options - should fail with self.assertRaises(SystemExit) as cm: options_to_check.parse_console(["--force", "refuse", "accept"]) - self.assertEqual(cm.exception.code, 2, msg="Cannot both force-refuse and force-accept - should exit with 2") + self.assertEqual(2, cm.exception.code, msg="Cannot both force-refuse and force-accept - should exit with 2") # User input correct - should pass options_to_check.parse_console(["--force", "refuse"]) @@ -47,20 +47,20 @@ def test_spoofed_keyword_option_with_equality_sign(self): # User input not in the dictionary of allowed options - should fail with self.assertRaises(SystemExit) as cm: options_to_check.parse_console(["--force=yo-mama"]) - self.assertEqual(cm.exception.code, 2, msg="Invalid arguments were passed to argparse so it should exit with 2") + self.assertEqual(2, cm.exception.code, msg="Invalid arguments were passed to argparse so it should exit with 2") # User input provided both options - should fail with self.assertRaises(SystemExit) as cm: options_to_check.parse_console(["--force=refuse --force=accept"]) - self.assertEqual(cm.exception.code, 2, msg="Cannot both force-refuse and force-accept - should exit with 2") + self.assertEqual(2, cm.exception.code, msg="Cannot both force-refuse and force-accept - should exit with 2") # User input correct - should pass options_to_check.parse_console(["--force=refuse"]) - self.assertEqual(options_to_check.force, "refuse") + self.assertEqual("refuse", options_to_check.force) # User input correct - should pass options_to_check.parse_console(["--force=accept"]) - self.assertEqual(options_to_check.force, "accept") + self.assertEqual("accept", options_to_check.force) def test_check_if_required_arguments_passed(self): # Setup @@ -78,11 +78,11 @@ def test_check_if_required_arguments_passed(self): with self.assertRaises(SystemExit) as cm: options_to_check.parse_console(["--optional_arg", "haha"]) print(options_to_check) - self.assertEqual(cm.exception.code, 2, msg="No filepath provided - should exit with 2") + self.assertEqual(2, cm.exception.code, msg="No filepath provided - should exit with 2") # User input correct - should pass options_to_check.parse_console(["wakanda forever"]) - self.assertEqual(options_to_check.filepath, "wakanda forever") + self.assertEqual("wakanda forever", options_to_check.filepath) def test_expected_failure_empty_argument_array(self): # Setup @@ -95,7 +95,7 @@ def test_expected_failure_empty_argument_array(self): # User provided no arguments whatsoever - should fail with self.assertRaises(SystemExit) as cm: options_to_check.parse_console([]) - self.assertEqual(cm.exception.code, 2, msg="No arguments provided to argparse so it should exit with 2") + self.assertEqual(2, cm.exception.code, msg="No arguments provided to argparse so it should exit with 2") # TODO: test Namespace=self where SettingsManager overwrites its default attributes with argparse def test_if_settings_manager_overwrites_its_properties_from_console(self): @@ -124,8 +124,8 @@ def test_if_settings_manager_overwrites_its_properties_from_console(self): # User input correct - should pass options_to_check.parse_console(["this is NOT the default value", "hello", "world"]) - self.assertEqual(options_to_check.filepath, "this is NOT the default value") - self.assertNotEqual(options_to_check.filepath, "this is the default value") + self.assertEqual("this is NOT the default value", options_to_check.filepath) + self.assertNotEqual("this is the default value", options_to_check.filepath) # because neither "foo" nor "bar" is part of the SettingsManager class, I need to access it like a key in dict - self.assertEqual(vars(options_to_check)["foo"], "hello") - self.assertEqual(vars(options_to_check)["bar"], "world") + self.assertEqual("hello", vars(options_to_check)["foo"]) + self.assertEqual("world", vars(options_to_check)["bar"]) diff --git a/_tests/test_dated_entries_group.py b/_tests/test_dated_entries_group.py index e29767d..7c46d1e 100644 --- a/_tests/test_dated_entries_group.py +++ b/_tests/test_dated_entries_group.py @@ -74,9 +74,9 @@ def test_create_dated_entries_groups(self): """ Try to instantiate an object of :class:`DatedEntriesGroup` with either valid or invalid dates """ - self.assertEqual(str(DatedEntriesGroup("2023-10-15")), "2023-10-15") - self.assertEqual(str(DatedEntriesGroup("2019-5-9")), "2019-5-9") - self.assertEqual(str(DatedEntriesGroup("2023-11-25")), "2023-11-25") + self.assertEqual("2023-10-15", str(DatedEntriesGroup("2023-10-15"))) + self.assertEqual("2019-5-9", str(DatedEntriesGroup("2019-5-9"))) + self.assertEqual("2023-11-25", str(DatedEntriesGroup("2023-11-25"))) self.assertRaises(InvalidDateError, DatedEntriesGroup, "00-") self.assertRaises(InvalidDateError, DatedEntriesGroup, "2199-32-32") @@ -113,8 +113,8 @@ def test_access_dated_entry(self): Tries to either access existing entries through :func:`access_dated_entry` or missing ones. Expected behaviour is for the :class:`DatedEntryGroup` to return the entry object if exists or raise exception. """ - self.assertEqual(str(self.sample_date.access_dated_entry("10:00 AM")), "10:00 AM") - self.assertEqual(str(self.sample_date.access_dated_entry("9:30 PM")), "9:30 PM") + self.assertEqual("10:00 AM", str(self.sample_date.access_dated_entry("10:00 AM"))) + self.assertEqual("9:30 PM", str(self.sample_date.access_dated_entry("9:30 PM"))) # Test cases for 12-hour format self.assertRaises(DatedEntryMissingError, self.sample_date.access_dated_entry, "2: AM") # <- no minutes @@ -146,8 +146,8 @@ def test_get_known_dated_entries(self): - former will raise ValueError if time is invalid - latter will raise KeyError if time is invalid """ - self.assertEqual(str(self.sample_date.known_entries_from_this_day["9:30 PM"]), "9:30 PM") - self.assertEqual(str(self.sample_date.known_entries_from_this_day["10:00 AM"]), "10:00 AM") + self.assertEqual("9:30 PM", str(self.sample_date.known_entries_from_this_day["9:30 PM"])) + self.assertEqual("10:00 AM", str(self.sample_date.known_entries_from_this_day["10:00 AM"])) self.assertRaises(KeyError, lambda: self.sample_date.known_entries_from_this_day["23:00"]) self.assertRaises(KeyError, lambda: self.sample_date.known_entries_from_this_day["11:50 AM"]) diff --git a/_tests/test_dated_entry.py b/_tests/test_dated_entry.py index 8607b81..3dbe82e 100644 --- a/_tests/test_dated_entry.py +++ b/_tests/test_dated_entry.py @@ -12,9 +12,9 @@ class TestDatedEntryUtils(TestCase): def test_slice_quotes(self): # TODO: Flip values in assertions, because unittest's 'first' param is expected, 'second' is actual. - self.assertEqual(slice_quotes("\"test\""), "test") - self.assertEqual(slice_quotes("\"\""), "") - self.assertEqual(slice_quotes("\" bicycle \""), "bicycle") + self.assertEqual("test", slice_quotes("\"test\"")) + self.assertEqual("", slice_quotes("\"\"")) + self.assertEqual("bicycle", slice_quotes("\" bicycle \"")) class TestTime(TestCase): @@ -53,11 +53,11 @@ def test_bare_minimum_dated_entries(self): ) # Then - self.assertTrue(bare_minimum_dated_entry.mood, "vaguely ok") - self.assertTrue(bare_minimum_dated_entry.uid, "1:49 AM") + self.assertTrue("vaguely ok", bare_minimum_dated_entry.mood) + self.assertTrue("1:49 AM", bare_minimum_dated_entry.uid) self.assertIsNone(bare_minimum_dated_entry.title) self.assertIsNone(bare_minimum_dated_entry.note) - self.assertListEqual(bare_minimum_dated_entry.activities, []) + self.assertListEqual([], bare_minimum_dated_entry.activities) def test_insufficient_dated_entries(self): self.assertRaises(ValueError, DatedEntry, time="2:00", mood="") diff --git a/_tests/test_mood.py b/_tests/test_mood.py index 1e28aac..01f00f7 100644 --- a/_tests/test_mood.py +++ b/_tests/test_mood.py @@ -16,24 +16,24 @@ def test_default_moodverse_no_customisation(self): self.assertTrue(isinstance(my_default_moodverse["bad"], MoodGroup)) self.assertTrue(isinstance(my_default_moodverse["awful"], MoodGroup)) - self.assertEqual(my_default_moodverse["rad"], ["rad"]) - self.assertEqual(my_default_moodverse["good"], ["good"]) - self.assertEqual(my_default_moodverse["neutral"], ["neutral"]) - self.assertEqual(my_default_moodverse["bad"], ["bad"]) - self.assertEqual(my_default_moodverse["awful"], ["awful"]) + self.assertEqual(["rad"], my_default_moodverse["rad"]) + self.assertEqual(["good"], my_default_moodverse["good"]) + self.assertEqual(["neutral"], my_default_moodverse["neutral"]) + self.assertEqual(["bad"], my_default_moodverse["bad"]) + self.assertEqual(["awful"], my_default_moodverse["awful"]) # this is just so I can test whether my __eq__ function overload correctly skips this self.assertNotEqual(my_default_moodverse["awful"], MoodGroup("awful")) # These comparisons should be falsy because the array has more moods than the default mood set initialised - self.assertNotEqual(my_default_moodverse["rad"], ["rad", "amazing"]) - self.assertNotEqual(my_default_moodverse["awful"], ["awful", "miserable"]) + self.assertNotEqual(["rad", "amazing"], my_default_moodverse["rad"]) + self.assertNotEqual(["awful", "miserable"], my_default_moodverse["awful"]) # This comparison should be falsy because it does not contain the default mood set initialised # └── known moods of 'neutral' group # └── neutral <-- from standard # And we're basically saying, "In neutral group there should only be a 'meh' mood" - self.assertNotEqual(my_default_moodverse["neutral"], ["meh"]) + self.assertNotEqual(["meh"], my_default_moodverse["neutral"]) def test_loading_valid_moods_into_moodverse(self): # These moods are self-sufficient, because even if standard mood set didn't exist, they satisfy all requirements @@ -56,11 +56,11 @@ def test_loading_valid_moods_into_moodverse(self): self.assertTrue(isinstance(my_moodverse["bad"], MoodGroup)) self.assertTrue(isinstance(my_moodverse["awful"], MoodGroup)) - self.assertEqual(my_moodverse["rad"], ["rad", "amazing"]) - self.assertEqual(my_moodverse["good"], ["good", "nice"]) - self.assertEqual(my_moodverse["neutral"], ["neutral", "ok", "fine"]) - self.assertEqual(my_moodverse["bad"], ["bad"]) - self.assertEqual(my_moodverse["awful"], ["awful", "miserable"]) + self.assertEqual(["rad", "amazing"], my_moodverse["rad"]) + self.assertEqual(["good", "nice"], my_moodverse["good"]) + self.assertEqual(["neutral", "ok", "fine"], my_moodverse["neutral"]) + self.assertEqual(["bad"], my_moodverse["bad"]) + self.assertEqual(["awful", "miserable"], my_moodverse["awful"]) def test_loading_semi_valid_moods_into_moodverse(self): # This mood set isn't self-sufficient, but still valid, because it has all the required "groups". @@ -105,18 +105,18 @@ def test_loading_semi_valid_moods_into_moodverse(self): self.assertTrue(isinstance(my_moodverse["awful"], MoodGroup)) # responses should be identical to the ones in previous test, because standard mood filled the blanks - self.assertEqual(my_moodverse["rad"], ["rad", "amazing"]) - self.assertEqual(my_moodverse["good"], ["good", "nice"]) - self.assertEqual(my_moodverse["neutral"], ["neutral", "ok", "fine"]) - self.assertEqual(my_moodverse["bad"], ["bad"]) - self.assertEqual(my_moodverse["awful"], ["awful", "miserable"]) + self.assertEqual(["rad", "amazing"], my_moodverse["rad"]) + self.assertEqual(["good", "nice"], my_moodverse["good"]) + self.assertEqual(["neutral", "ok", "fine"], my_moodverse["neutral"]) + self.assertEqual(["bad"], my_moodverse["bad"]) + self.assertEqual(["awful", "miserable"], my_moodverse["awful"]) # let's shuffle the order of values around to check if both lists are still equal - self.assertEqual(my_moodverse["rad"], ["amazing", "rad"]) - self.assertEqual(my_moodverse["good"], ["nice", "good"]) - self.assertEqual(my_moodverse["neutral"], ["ok", "neutral", "fine"]) - self.assertEqual(my_moodverse["bad"], ["bad"]) - self.assertEqual(my_moodverse["awful"], ["miserable", "awful"]) + self.assertEqual(["amazing", "rad"], my_moodverse["rad"]) + self.assertEqual(["nice", "good"], my_moodverse["good"]) + self.assertEqual(["ok", "neutral", "fine"], my_moodverse["neutral"]) + self.assertEqual(["bad"], my_moodverse["bad"]) + self.assertEqual(["miserable", "awful"], my_moodverse["awful"]) def test_get_mood(self): # These moods are self-sufficient, because even if standard mood set didn't exist, they satisfy all requirements @@ -210,8 +210,8 @@ def test_create_mood_in_this_group(self): # Check if they exist # checks __eq__ overload - obj(group_name) == str(group_name) - self.assertEqual(my_fancy_group["fancy"], "fancy") - self.assertEqual(my_fancy_group["out of this world"], "out of this world") + self.assertEqual("fancy", my_fancy_group["fancy"]) + self.assertEqual("out of this world", my_fancy_group["out of this world"]) # also checks __getitem__ - obj(group_name)[group_name]: List[mood: str] - self.assertSetEqual(set(my_fancy_group.known_moods), {"out of this world", "fancy"}) + self.assertSetEqual({"out of this world", "fancy"}, set(my_fancy_group.known_moods)) diff --git a/_tests/test_output.py b/_tests/test_output.py index 0642c0b..312b735 100644 --- a/_tests/test_output.py +++ b/_tests/test_output.py @@ -44,7 +44,7 @@ def test_bare_minimum_entry_content(self): # --- # getvalue() returns the entire stream content regardless of current stream position, read() does not. # https://stackoverflow.com/a/53485819 - self.assertEqual(my_fake_file_stream.getvalue(), compare_stream.getvalue()) + self.assertEqual(compare_stream.getvalue(), my_fake_file_stream.getvalue()) def test_entry_with_title_no_note(self): """ @@ -71,7 +71,7 @@ def test_entry_with_title_no_note(self): # THEN # --- - self.assertEqual(my_fake_file_stream.getvalue(), compare_stream.getvalue()) + self.assertEqual(compare_stream.getvalue(), my_fake_file_stream.getvalue()) def test_entry_with_title_and_note(self): """ @@ -102,7 +102,7 @@ def test_entry_with_title_and_note(self): # THEN # --- - self.assertEqual(my_fake_file_stream.getvalue(), compare_stream.getvalue()) + self.assertEqual(compare_stream.getvalue(), my_fake_file_stream.getvalue()) def test_entry_with_hashtagged_activities(self): """ @@ -129,7 +129,7 @@ def test_entry_with_hashtagged_activities(self): # THEN # --- - self.assertEqual(my_fake_file_stream.getvalue(), compare_stream.getvalue()) + self.assertEqual(compare_stream.getvalue(), my_fake_file_stream.getvalue()) # WHEN # --- @@ -148,7 +148,7 @@ def test_entry_with_hashtagged_activities(self): # THEN # --- - self.assertEqual(my_fake_file_stream.getvalue(), compare_stream.getvalue()) + self.assertEqual(compare_stream.getvalue(), my_fake_file_stream.getvalue()) class TestDatedEntriesGroup(TestCase): @@ -179,7 +179,7 @@ def test_outputting_day_with_one_entry(self): # THEN # --- - self.assertEqual(my_fake_file_stream.getvalue(), compare_stream.getvalue()) + self.assertEqual(compare_stream.getvalue(), my_fake_file_stream.getvalue()) def test_outputting_day_with_two_entries(self): """ @@ -219,7 +219,7 @@ def test_outputting_day_with_two_entries(self): # THEN # --- - self.assertEqual(my_fake_file_stream.getvalue(), compare_stream.getvalue()) + self.assertEqual(compare_stream.getvalue(), my_fake_file_stream.getvalue()) def test_outputting_day_with_two_entries_and_invalid_filetags(self): """ @@ -259,7 +259,7 @@ def test_outputting_day_with_two_entries_and_invalid_filetags(self): # THEN # --- - self.assertEqual(my_fake_file_stream.getvalue(), compare_stream.getvalue()) + self.assertEqual(compare_stream.getvalue(), my_fake_file_stream.getvalue()) def test_outputting_day_with_two_entries_and_partially_valid_filetags(self): """ @@ -303,7 +303,7 @@ def test_outputting_day_with_two_entries_and_partially_valid_filetags(self): # THEN # --- - self.assertEqual(my_fake_file_stream.getvalue(), compare_stream.getvalue()) + self.assertEqual(compare_stream.getvalue(), my_fake_file_stream.getvalue()) class TestOutputFileStructure(TestCase): @@ -323,19 +323,19 @@ def test_directory_loop(self): with open("_tests/output-results/2022/10/2022-10-25.md", encoding="UTF-8") as parsed_result: with open("_tests/expected_results/2022-10-25.md", encoding="UTF-8") as expected_result: - self.assertListEqual(parsed_result.readlines(), expected_result.readlines()) + self.assertListEqual(expected_result.readlines(), parsed_result.readlines()) with open("_tests/output-results/2022/10/2022-10-26.md", encoding="UTF-8") as parsed_result: with open("_tests/expected_results/2022-10-26.md", encoding="UTF-8") as expected_result: - self.assertListEqual(parsed_result.readlines(), expected_result.readlines()) + self.assertListEqual(expected_result.readlines(), parsed_result.readlines()) with open("_tests/output-results/2022/10/2022-10-27.md", encoding="UTF-8") as parsed_result: with open("_tests/expected_results/2022-10-27.md", encoding="UTF-8") as expected_result: - self.assertListEqual(parsed_result.readlines(), expected_result.readlines()) + self.assertListEqual(expected_result.readlines(), parsed_result.readlines()) with open("_tests/output-results/2022/10/2022-10-30.md", encoding="UTF-8") as parsed_result: with open("_tests/expected_results/2022-10-30.md", encoding="UTF-8") as expected_result: - self.assertListEqual(parsed_result.readlines(), expected_result.readlines()) + self.assertListEqual(expected_result.readlines(), parsed_result.readlines()) def tearDown(self) -> None: folder = '_tests/output-results' diff --git a/_tests/test_utils.py b/_tests/test_utils.py index 686c70a..472e813 100644 --- a/_tests/test_utils.py +++ b/_tests/test_utils.py @@ -8,12 +8,12 @@ class TestUtils(TestCase): def test_slugify(self): # no need to check if slug is a valid tag # noinspection SpellCheckingInspection - self.assertEqual(utils.slugify("ConvertThis to-------a SLUG", False), "convertthis-to-a-slug") + self.assertEqual("convertthis-to-a-slug", utils.slugify("ConvertThis to-------a SLUG", False)) # noinspection SpellCheckingInspection - self.assertEqual(utils.slugify("Zażółć gęślą jaźń ", False), "zażółć-gęślą-jaźń") - self.assertEqual(utils.slugify(" Multiple spaces between words", False), "multiple-spaces-between-words") + self.assertEqual("zażółć-gęślą-jaźń", utils.slugify("Zażółć gęślą jaźń ", False)) + self.assertEqual("multiple-spaces-between-words", utils.slugify(" Multiple spaces between words", False)) # noinspection SpellCheckingInspection - self.assertEqual(utils.slugify("Хлеба нашего повшеднего", False), "хлеба-нашего-повшеднего") + self.assertEqual("хлеба-нашего-повшеднего", utils.slugify("Хлеба нашего повшеднего", False)) # check if the slug is a valid tag with self.assertLogs(logging.getLogger("src.utils"), logging.WARNING): From d75d319d76c4956267061967583f3113643bfced Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Wed, 24 Jan 2024 18:21:17 +0100 Subject: [PATCH 32/40] fail pylint only with score under 9 Signed-off-by: DeutscheGabanna <thetobruk@duck.com> --- .github/workflows/pylint.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pylint.yaml b/.github/workflows/pylint.yaml index dcf818b..1f28d31 100755 --- a/.github/workflows/pylint.yaml +++ b/.github/workflows/pylint.yaml @@ -12,4 +12,4 @@ jobs: pip install pylint - uses: pr-annotators/pylint-pr-annotator@main - name: Analysing the code with pylint - run: pylint $(git ls-files '*.py') \ No newline at end of file + run: pylint $(git ls-files '*.py') --fail-under 9 \ No newline at end of file From 93a27fda07a9b4c51e6c51c07a9b7aef92984352 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Wed, 24 Jan 2024 18:35:17 +0100 Subject: [PATCH 33/40] reorganised test files and folders Signed-off-by: DeutscheGabanna <thetobruk@duck.com> --- _tests/dev-null | 0 .../expected_results/2022-10-25.md | 0 .../expected_results/2022-10-26.md | 0 .../expected_results/2022-10-27.md | 0 .../expected_results/2022-10-30.md | 0 .../{ => files/journal_CSVs}/empty_sheet.csv | 0 .../journal_CSVs}/sheet-1-valid-data.csv | 0 .../journal_CSVs}/sheet-2-corrupted-bytes.csv | Bin .../journal_CSVs}/sheet-3-wrong-format.txt | 0 .../journal_CSVs}/sheet-4-no-extension | Bin .../journal_CSVs}/sheet-6-empty-file.csv | 0 _tests/{ => files}/locked-dir/locked_file.csv | 0 .../mood_JSONs}/incomplete-moods.json | 0 _tests/test_dated_entry.py | 4 +- _tests/test_librarian.py | 47 ++++++++++-------- _tests/test_output.py | 20 ++++---- 16 files changed, 37 insertions(+), 34 deletions(-) delete mode 100755 _tests/dev-null rename _tests/{ => files}/expected_results/2022-10-25.md (100%) rename _tests/{ => files}/expected_results/2022-10-26.md (100%) rename _tests/{ => files}/expected_results/2022-10-27.md (100%) rename _tests/{ => files}/expected_results/2022-10-30.md (100%) rename _tests/{ => files/journal_CSVs}/empty_sheet.csv (100%) rename _tests/{ => files/journal_CSVs}/sheet-1-valid-data.csv (100%) rename _tests/{ => files/journal_CSVs}/sheet-2-corrupted-bytes.csv (100%) rename _tests/{ => files/journal_CSVs}/sheet-3-wrong-format.txt (100%) rename _tests/{ => files/journal_CSVs}/sheet-4-no-extension (100%) rename _tests/{ => files/journal_CSVs}/sheet-6-empty-file.csv (100%) rename _tests/{ => files}/locked-dir/locked_file.csv (100%) rename _tests/{ => files/mood_JSONs}/incomplete-moods.json (100%) diff --git a/_tests/dev-null b/_tests/dev-null deleted file mode 100755 index e69de29..0000000 diff --git a/_tests/expected_results/2022-10-25.md b/_tests/files/expected_results/2022-10-25.md similarity index 100% rename from _tests/expected_results/2022-10-25.md rename to _tests/files/expected_results/2022-10-25.md diff --git a/_tests/expected_results/2022-10-26.md b/_tests/files/expected_results/2022-10-26.md similarity index 100% rename from _tests/expected_results/2022-10-26.md rename to _tests/files/expected_results/2022-10-26.md diff --git a/_tests/expected_results/2022-10-27.md b/_tests/files/expected_results/2022-10-27.md similarity index 100% rename from _tests/expected_results/2022-10-27.md rename to _tests/files/expected_results/2022-10-27.md diff --git a/_tests/expected_results/2022-10-30.md b/_tests/files/expected_results/2022-10-30.md similarity index 100% rename from _tests/expected_results/2022-10-30.md rename to _tests/files/expected_results/2022-10-30.md diff --git a/_tests/empty_sheet.csv b/_tests/files/journal_CSVs/empty_sheet.csv similarity index 100% rename from _tests/empty_sheet.csv rename to _tests/files/journal_CSVs/empty_sheet.csv diff --git a/_tests/sheet-1-valid-data.csv b/_tests/files/journal_CSVs/sheet-1-valid-data.csv similarity index 100% rename from _tests/sheet-1-valid-data.csv rename to _tests/files/journal_CSVs/sheet-1-valid-data.csv diff --git a/_tests/sheet-2-corrupted-bytes.csv b/_tests/files/journal_CSVs/sheet-2-corrupted-bytes.csv similarity index 100% rename from _tests/sheet-2-corrupted-bytes.csv rename to _tests/files/journal_CSVs/sheet-2-corrupted-bytes.csv diff --git a/_tests/sheet-3-wrong-format.txt b/_tests/files/journal_CSVs/sheet-3-wrong-format.txt similarity index 100% rename from _tests/sheet-3-wrong-format.txt rename to _tests/files/journal_CSVs/sheet-3-wrong-format.txt diff --git a/_tests/sheet-4-no-extension b/_tests/files/journal_CSVs/sheet-4-no-extension similarity index 100% rename from _tests/sheet-4-no-extension rename to _tests/files/journal_CSVs/sheet-4-no-extension diff --git a/_tests/sheet-6-empty-file.csv b/_tests/files/journal_CSVs/sheet-6-empty-file.csv similarity index 100% rename from _tests/sheet-6-empty-file.csv rename to _tests/files/journal_CSVs/sheet-6-empty-file.csv diff --git a/_tests/locked-dir/locked_file.csv b/_tests/files/locked-dir/locked_file.csv similarity index 100% rename from _tests/locked-dir/locked_file.csv rename to _tests/files/locked-dir/locked_file.csv diff --git a/_tests/incomplete-moods.json b/_tests/files/mood_JSONs/incomplete-moods.json similarity index 100% rename from _tests/incomplete-moods.json rename to _tests/files/mood_JSONs/incomplete-moods.json diff --git a/_tests/test_dated_entry.py b/_tests/test_dated_entry.py index 3dbe82e..2b05b4e 100644 --- a/_tests/test_dated_entry.py +++ b/_tests/test_dated_entry.py @@ -53,8 +53,8 @@ def test_bare_minimum_dated_entries(self): ) # Then - self.assertTrue("vaguely ok", bare_minimum_dated_entry.mood) - self.assertTrue("1:49 AM", bare_minimum_dated_entry.uid) + self.assertEqual("vaguely ok", bare_minimum_dated_entry.mood) + self.assertEqual("1:49 AM", bare_minimum_dated_entry.uid) self.assertIsNone(bare_minimum_dated_entry.title) self.assertIsNone(bare_minimum_dated_entry.note) self.assertListEqual([], bare_minimum_dated_entry.activities) diff --git a/_tests/test_librarian.py b/_tests/test_librarian.py index 04e7818..0a0c4ea 100644 --- a/_tests/test_librarian.py +++ b/_tests/test_librarian.py @@ -11,19 +11,19 @@ class TestLibrarian(TestCase): We use internal class methods to check proper handling of data throughout the process. """ def test_init_valid_csv(self): - self.assertTrue(Librarian("_tests/sheet-1-valid-data.csv")) + self.assertTrue(Librarian("_tests/files/journal_CSVs/sheet-1-valid-data.csv")) def test_init_invalid_csv(self): """ Pass faulty files and see if it fails as expected. """ - self.assertRaises(librarian.CannotAccessFileError, Librarian, "_tests/sheet-2-corrupted-bytes.csv") - self.assertRaises(librarian.CannotAccessFileError, Librarian, "_tests/sheet-3-wrong-format.txt") - self.assertRaises(librarian.CannotAccessFileError, Librarian, "_tests/sheet-4-no-extension") - self.assertRaises(librarian.CannotAccessFileError, Librarian, "_tests/sheet-5-missing-file.csv") + self.assertRaises(librarian.CannotAccessFileError, Librarian, "_tests/files/journal_CSVs/sheet-2-corrupted-bytes.csv") + self.assertRaises(librarian.CannotAccessFileError, Librarian, "_tests/files/journal_CSVs/sheet-3-wrong-format.txt") + self.assertRaises(librarian.CannotAccessFileError, Librarian, "_tests/files/journal_CSVs/sheet-4-no-extension") + self.assertRaises(librarian.CannotAccessFileError, Librarian, "_tests/files/journal_CSVs/sheet-5-missing-file.csv") # TODO: handle this case in Librarian - # self.assertRaises(librarian.CannotAccessFileError, Librarian, "_tests/sheet-6-empty-file.csv") + # self.assertRaises(librarian.CannotAccessFileError, Librarian, "_tests/files/journal_CSVs/sheet-6-empty-file.csv") # TODO: maybe generate corrupted_sheet and wrong_format during runner setup in workflow mode? # dd if=/dev/urandom of="$corrupted_file" bs=1024 count=10 @@ -34,11 +34,11 @@ def test_init_invalid_csv(self): def test_valid_access_dates(self): """ - All the following dates exist in the _tests/sheet-1-valid-data.csv and should be accessible by ``lib``. + All the following dates exist in the _tests/files/journal_CSVs/sheet-1-valid-data.csv and should be accessible by ``lib``. """ # When lib = Librarian( - path_to_file="_tests/sheet-1-valid-data.csv", + path_to_file="_tests/files/journal_CSVs/sheet-1-valid-data.csv", path_to_moods="moods.json" ) @@ -56,11 +56,11 @@ def test_valid_access_dates(self): def test_wrong_access_dates(self): """ - **None** of the following dates exist in the _tests/sheet-1-valid-data.csv and should **NOT** be accessible by ``lib``. + **None** of the following dates exist in the _tests/files/journal_CSVs/sheet-1-valid-data.csv and should **NOT** be accessible by ``lib``. """ # When lib = Librarian( - path_to_file="_tests/sheet-1-valid-data.csv", + path_to_file="_tests/files/journal_CSVs/sheet-1-valid-data.csv", path_to_moods="moods.json" ) @@ -90,38 +90,41 @@ def test_wrong_access_dates(self): def test_custom_moods_when_passed_correctly(self): """Pass a valid JSON file and see if it knows it has access to custom moods now.""" self.assertTrue(Librarian( - path_to_file="_tests/sheet-1-valid-data.csv", + path_to_file="_tests/files/journal_CSVs/sheet-1-valid-data.csv", path_to_moods="moods.json" ).current_mood_set.has_custom_moods) def test_custom_moods_when_not_passed(self): """Pass no moods and see if it know it only has standard moods available.""" self.assertFalse(Librarian( - path_to_file="_tests/sheet-1-valid-data.csv" + path_to_file="_tests/files/journal_CSVs/sheet-1-valid-data.csv" ).current_mood_set.has_custom_moods) def test_custom_moods_with_invalid_jsons(self): """Pass faulty moods and see if it fails as expected.""" self.assertRaises( librarian.CannotAccessCustomMoodsError, - Librarian, "_tests/sheet-1-valid-data.csv", "_tests/output-results/", "_tests/empty_sheet.csv" + Librarian, + "_tests/files/journal_CSVs/sheet-1-valid-data.csv", + "_tests/files/output-results/", + "_tests/files/empty_sheet.csv" ) def test_custom_moods_when_json_invalid(self): self.assertRaises(librarian.CannotAccessCustomMoodsError, Librarian, - "_tests/sheet-1-valid-data.csv", - "_tests/output-results/", + "_tests/files/journal_CSVs/sheet-1-valid-data.csv", + "_tests/files/output-results/", "_tests/empty_sheet.csv") self.assertRaises(librarian.CannotAccessCustomMoodsError, Librarian, - "_tests/sheet-1-valid-data.csv", - "_tests/output-results/", + "_tests/files/journal_CSVs/sheet-1-valid-data.csv", + "_tests/files/output-results/", "_tests/missing-file.json") self.assertRaises(librarian.CannotAccessCustomMoodsError, Librarian, - "_tests/sheet-1-valid-data.csv", - "_tests/output-results/", + "_tests/files/journal_CSVs/sheet-1-valid-data.csv", + "_tests/files/output-results/", "_tests/locked-dir/locked_file.csv") def test_custom_moods_that_are_incomplete(self): @@ -131,8 +134,8 @@ def test_custom_moods_that_are_incomplete(self): Therefore, since ``incomplete-moods`` lacks the ``good`` group, the assertion will evaluate to False. """ lib_to_test = Librarian( - "_tests/sheet-1-valid-data.csv", - "_tests/output-results/", - "_tests/incomplete-moods.json" + "_tests/files/journal_CSVs/sheet-1-valid-data.csv", + "_tests/files/output-results/", + "_tests/mood_JSONs/incomplete-moods.json" ) self.assertFalse(lib_to_test.current_mood_set.has_custom_moods) diff --git a/_tests/test_output.py b/_tests/test_output.py index 312b735..2dc9af6 100644 --- a/_tests/test_output.py +++ b/_tests/test_output.py @@ -318,27 +318,27 @@ def test_directory_loop(self): """ options.tags = ["daily"] - lib = Librarian("_tests/sheet-1-valid-data.csv", path_to_output="_tests/output-results") + lib = Librarian("_tests/files/journal_CSVs/sheet-1-valid-data.csv", path_to_output="_tests/files/output-results") lib.output_all() - with open("_tests/output-results/2022/10/2022-10-25.md", encoding="UTF-8") as parsed_result: - with open("_tests/expected_results/2022-10-25.md", encoding="UTF-8") as expected_result: + with open("_tests/files/output-results/2022/10/2022-10-25.md", encoding="UTF-8") as parsed_result: + with open("_tests/files/expected_results/2022-10-25.md", encoding="UTF-8") as expected_result: self.assertListEqual(expected_result.readlines(), parsed_result.readlines()) - with open("_tests/output-results/2022/10/2022-10-26.md", encoding="UTF-8") as parsed_result: - with open("_tests/expected_results/2022-10-26.md", encoding="UTF-8") as expected_result: + with open("_tests/files/output-results/2022/10/2022-10-26.md", encoding="UTF-8") as parsed_result: + with open("_tests/files/expected_results/2022-10-26.md", encoding="UTF-8") as expected_result: self.assertListEqual(expected_result.readlines(), parsed_result.readlines()) - with open("_tests/output-results/2022/10/2022-10-27.md", encoding="UTF-8") as parsed_result: - with open("_tests/expected_results/2022-10-27.md", encoding="UTF-8") as expected_result: + with open("_tests/files/output-results/2022/10/2022-10-27.md", encoding="UTF-8") as parsed_result: + with open("_tests/files/expected_results/2022-10-27.md", encoding="UTF-8") as expected_result: self.assertListEqual(expected_result.readlines(), parsed_result.readlines()) - with open("_tests/output-results/2022/10/2022-10-30.md", encoding="UTF-8") as parsed_result: - with open("_tests/expected_results/2022-10-30.md", encoding="UTF-8") as expected_result: + with open("_tests/files/output-results/2022/10/2022-10-30.md", encoding="UTF-8") as parsed_result: + with open("_tests/files/expected_results/2022-10-30.md", encoding="UTF-8") as expected_result: self.assertListEqual(expected_result.readlines(), parsed_result.readlines()) def tearDown(self) -> None: - folder = '_tests/output-results' + folder = '_tests/files/output-results' for filename in os.listdir(folder): file_path = os.path.join(folder, filename) try: From 4cbc16be88d0a0a232482c395dd5400af179d8e6 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Wed, 24 Jan 2024 18:41:55 +0100 Subject: [PATCH 34/40] deletes os.linesep imports Signed-off-by: DeutscheGabanna <thetobruk@duck.com> --- src/dated_entry.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/dated_entry.py b/src/dated_entry.py index 4bd20e6..5732c1d 100644 --- a/src/dated_entry.py +++ b/src/dated_entry.py @@ -8,7 +8,6 @@ from __future__ import annotations import logging -import os # used only for linesep import re import io from typing import Match From dc99bb2e4a1e982a085683c0571fd12c1b3ce05b Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Wed, 24 Jan 2024 18:55:31 +0100 Subject: [PATCH 35/40] better exceptions and tests in dated_entry.py Signed-off-by: DeutscheGabanna <thetobruk@duck.com> --- _tests/test_dated_entry.py | 3 +++ src/dated_entry.py | 10 ++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/_tests/test_dated_entry.py b/_tests/test_dated_entry.py index 2b05b4e..aac5204 100644 --- a/_tests/test_dated_entry.py +++ b/_tests/test_dated_entry.py @@ -16,6 +16,9 @@ def test_slice_quotes(self): self.assertEqual("", slice_quotes("\"\"")) self.assertEqual("bicycle", slice_quotes("\" bicycle \"")) + def test_is_time_format_valid(self): + + class TestTime(TestCase): def test_try_creating_valid_times(self): diff --git a/src/dated_entry.py b/src/dated_entry.py index 5732c1d..6f2c91c 100644 --- a/src/dated_entry.py +++ b/src/dated_entry.py @@ -68,7 +68,11 @@ class IsNotTimeError(utils.CustomException): - """Expected a string in a valid time format - HH:MM with optional AM/PM suffix.""" + def __init__(self, tried_time: str, logger: logging.Logger = None): + msg = f"Expected HH:MM (+ optionally AM/PM suffix) but got {tried_time} instead." + if logger is not None: + logger.warning(msg) + super().__init__(msg) def is_time_format_valid(string: str) -> Match[str] | None: @@ -144,9 +148,7 @@ def __init__(self, string: str): # NOT OK else: - msg_on_error = ErrorMsg.print(ErrorMsg.WRONG_VALUE, string, "HH:MM (AM/PM/)") - self.__logger.warning(msg_on_error) - raise IsNotTimeError(msg_on_error) + raise IsNotTimeError(string, self.__logger) def __str__(self) -> str: """ From 0cc7c8290bf663ed0ee7738a183d5410338fb288 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Wed, 24 Jan 2024 19:02:34 +0100 Subject: [PATCH 36/40] switched expected and actual results in assertions & deleted TODO about it Signed-off-by: DeutscheGabanna <thetobruk@duck.com> --- _tests/test_dated_entry.py | 1 - 1 file changed, 1 deletion(-) diff --git a/_tests/test_dated_entry.py b/_tests/test_dated_entry.py index 3dbe82e..04c0a4c 100644 --- a/_tests/test_dated_entry.py +++ b/_tests/test_dated_entry.py @@ -11,7 +11,6 @@ class TestDatedEntryUtils(TestCase): def test_slice_quotes(self): - # TODO: Flip values in assertions, because unittest's 'first' param is expected, 'second' is actual. self.assertEqual("test", slice_quotes("\"test\"")) self.assertEqual("", slice_quotes("\"\"")) self.assertEqual("bicycle", slice_quotes("\" bicycle \"")) From 45d9942cc54478b4d1048065f40126a245303fea Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Wed, 24 Jan 2024 21:56:39 +0100 Subject: [PATCH 37/40] better exceptions and tests in dated_entry.py Signed-off-by: DeutscheGabanna <thetobruk@duck.com> --- _tests/test_dated_entry.py | 65 +++++++++++++++++++++++++++++++++++--- src/dated_entry.py | 12 +++---- 2 files changed, 67 insertions(+), 10 deletions(-) diff --git a/_tests/test_dated_entry.py b/_tests/test_dated_entry.py index cdd5647..56a12dd 100644 --- a/_tests/test_dated_entry.py +++ b/_tests/test_dated_entry.py @@ -4,20 +4,66 @@ Time, \ slice_quotes, \ DatedEntry, \ - IsNotTimeError + IsNotTimeError, \ + is_time_format_valid, \ + is_time_range_valid # TODO: more test coverage needed class TestDatedEntryUtils(TestCase): + def test_is_time_format_valid(self): + self.assertTrue(is_time_format_valid("1:49 AM")) + self.assertTrue(is_time_format_valid("02:15")) + self.assertTrue(is_time_format_valid("12:00")) + self.assertTrue(is_time_format_valid("1:49 PM")) + self.assertFalse(is_time_format_valid("1::49")) + self.assertFalse(is_time_format_valid("12:60 AM")) + # noinspection SpellCheckingInspection + self.assertFalse(is_time_format_valid("okk:oksdf s")) + self.assertFalse(is_time_format_valid("25:00 AM")) + self.assertFalse(is_time_format_valid("26:10")) + self.assertFalse(is_time_format_valid("12:60 PM")) + self.assertFalse(is_time_format_valid("12:00 XX")) + self.assertFalse(is_time_format_valid("abc:def AM")) + self.assertFalse(is_time_format_valid("abc:def XM")) + self.assertFalse(is_time_format_valid("24:00 PM")) + self.assertFalse(is_time_format_valid("00:61 AM")) + self.assertFalse(is_time_format_valid("---")) + self.assertFalse(is_time_format_valid("23y7vg")) + self.assertFalse(is_time_format_valid("::::")) + self.assertFalse(is_time_format_valid("????")) + self.assertFalse(is_time_format_valid("00000:000000000000")) + self.assertFalse(is_time_format_valid("99:12")) + self.assertFalse(is_time_format_valid("11:12 UU")) + self.assertFalse(is_time_format_valid("9::12")) + + # as expected, this will return True, because we're not checking ranges yet + self.assertTrue(is_time_format_valid("14:59 AM")) + + def test_is_time_range_valid(self): + self.assertTrue(is_time_range_valid("11:00 AM")) + self.assertTrue(is_time_range_valid("3:00 AM")) + self.assertTrue(is_time_range_valid("7:59 AM")) + self.assertTrue(is_time_range_valid("17:50")) + self.assertTrue(is_time_range_valid("21:37")) + self.assertTrue(is_time_range_valid("00:00")) + self.assertTrue(is_time_range_valid("14:25")) + + self.assertFalse(is_time_range_valid("31:00")) + self.assertFalse(is_time_range_valid("11:79")) + self.assertFalse(is_time_range_valid("20:99 PM")) + self.assertFalse(is_time_range_valid("-5:12")) + self.assertFalse(is_time_range_valid("-5:-12")) + self.assertFalse(is_time_range_valid("-5:-12")) + self.assertFalse(is_time_range_valid("13:00 AM")) + self.assertFalse(is_time_range_valid("15:00 PM")) + def test_slice_quotes(self): self.assertEqual("test", slice_quotes("\"test\"")) self.assertEqual("", slice_quotes("\"\"")) self.assertEqual("bicycle", slice_quotes("\" bicycle \"")) - def test_is_time_format_valid(self): - - class TestTime(TestCase): def test_try_creating_valid_times(self): @@ -45,6 +91,17 @@ def test_try_creating_invalid_times(self): self.assertRaises(IsNotTimeError, Time, "24:00 PM") self.assertRaises(IsNotTimeError, Time, "00:61 AM") + def test_str(self): + self.assertEqual("1:49 AM", str(Time("1:49 AM"))) + self.assertEqual("02:15 AM", str(Time("02:15 AM"))) + self.assertEqual("12:00 PM", str(Time("12:00 PM"))) + self.assertEqual("6:30 PM", str(Time("6:30 PM"))) + self.assertEqual("9:45 PM", str(Time("9:45 PM"))) + self.assertEqual("00:00 AM", str(Time("00:00 AM"))) + self.assertEqual("12:00 AM", str(Time("12:00 AM"))) + self.assertEqual("13:30", str(Time("13:30"))) + self.assertEqual("9:45", str(Time("9:45"))) + class TestDatedEntry(TestCase): def test_bare_minimum_dated_entries(self): diff --git a/src/dated_entry.py b/src/dated_entry.py index 6f2c91c..4689205 100644 --- a/src/dated_entry.py +++ b/src/dated_entry.py @@ -68,11 +68,10 @@ class IsNotTimeError(utils.CustomException): - def __init__(self, tried_time: str, logger: logging.Logger = None): - msg = f"Expected HH:MM (+ optionally AM/PM suffix) but got {tried_time} instead." - if logger is not None: - logger.warning(msg) - super().__init__(msg) + msg = "Expected HH:MM (+ optionally AM/PM suffix) but got {} instead." + + def __init__(self, tried_time: str): + super().__init__(type(self).msg.format(tried_time)) def is_time_format_valid(string: str) -> Match[str] | None: @@ -148,7 +147,8 @@ def __init__(self, string: str): # NOT OK else: - raise IsNotTimeError(string, self.__logger) + self.__logger.warning(IsNotTimeError.msg.format(string)) + raise IsNotTimeError(string) def __str__(self) -> str: """ From 09a5c7b6bc98abad0e1efa98a128d983a0ec7297 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Wed, 24 Jan 2024 22:17:48 +0100 Subject: [PATCH 38/40] generalised into a function process of extracing only truthy values out of a delimited string Signed-off-by: DeutscheGabanna <thetobruk@duck.com> --- _tests/test_dated_entry.py | 10 +++++++++- _tests/test_utils.py | 4 ++++ src/dated_entry.py | 21 +++------------------ src/utils.py | 23 +++++++++++++++++++++++ 4 files changed, 39 insertions(+), 19 deletions(-) diff --git a/_tests/test_dated_entry.py b/_tests/test_dated_entry.py index 56a12dd..601c1b4 100644 --- a/_tests/test_dated_entry.py +++ b/_tests/test_dated_entry.py @@ -78,6 +78,14 @@ def test_try_creating_valid_times(self): self.assertTrue(Time("13:30")) self.assertTrue(Time("9:45")) + def test_try_whitespaces(self): + self.assertTrue(Time(" 1:49 AM ")) + self.assertTrue(Time("02:15 AM ")) + self.assertTrue(Time(" 12:00 PM")) + self.assertEqual("01:49 AM", Time(" 1:49 AM ")) + self.assertEqual("02:15 AM", Time("02:15 AM ")) + self.assertEqual("12:00 PM", Time(" 12:00 PM")) + def test_try_creating_invalid_times(self): # Invalid time formats # noinspection SpellCheckingInspection @@ -113,7 +121,7 @@ def test_bare_minimum_dated_entries(self): # Then self.assertEqual("vaguely ok", bare_minimum_dated_entry.mood) - self.assertEqual("1:49 AM", bare_minimum_dated_entry.uid) + self.assertEqual("1:49 AM", str(bare_minimum_dated_entry.uid)) self.assertIsNone(bare_minimum_dated_entry.title) self.assertIsNone(bare_minimum_dated_entry.note) self.assertListEqual([], bare_minimum_dated_entry.activities) diff --git a/_tests/test_utils.py b/_tests/test_utils.py index 472e813..9bae4da 100644 --- a/_tests/test_utils.py +++ b/_tests/test_utils.py @@ -30,3 +30,7 @@ def test_expand_path(self): self.assertFalse(utils.expand_path("$HOME/whatever").startswith("$HOME")) # noinspection SpellCheckingInspection self.assertFalse(utils.expand_path('~/yes').startswith('~')) + + def test_strip_and_get_truthy(self): + self.assertListEqual(["one", "two"], utils.strip_and_get_truthy("\"one||two|||||\"", "|")) + self.assertListEqual([], utils.strip_and_get_truthy("\"\"", "|")) diff --git a/src/dated_entry.py b/src/dated_entry.py index 4689205..e3f7b39 100644 --- a/src/dated_entry.py +++ b/src/dated_entry.py @@ -103,18 +103,6 @@ def is_time_range_valid(string: str) -> bool: return all((is_hour_ok, is_minutes_ok)) -def slice_quotes(string: str) -> str: - """ - Gets rid of initial and terminating quotation marks inserted by Daylio - :param string: string to be sliced - :returns: string without quotation marks in the beginning and end of the initial string, even if it means empty str. - """ - if string is not None and len(string) > 2: - return string.strip("\"").strip() - # only 2 characters? Then it is an empty cell. - return "" - - class ErrorMsg(errors.ErrorMsgBase): INVALID_MOOD = "Mood {} is missing from a list of known moods. Not critical, but colouring won't work on the entry." WRONG_TIME = "Received {}, expected valid time. Cannot create this entry without a valid time." @@ -217,11 +205,8 @@ def __init__(self, # --- # Process activities # --- - # TODO: I could make a decouple_and_sanitise() func to strip() and slice the string into a valid array self.__activities = [] - # empty string "" is unfortunately still a valid element of array which makes it truthy - # I use list comprehension to discard such falsy values from the temporary array - array = [activity for activity in slice_quotes(activities).split(options.csv_delimiter) if activity] + array = utils.strip_and_get_truthy(activities, options.csv_delimiter) if len(array) > 0: for activity in array: self.__activities.append(utils.slugify( @@ -235,7 +220,7 @@ def __init__(self, # --- self.__title = None if title: - self.__title = slice_quotes(title) + self.__title = utils.slice_quotes(title) else: errors.ErrorMsgBase.print(ErrorMsg.WRONG_TITLE) # --- @@ -243,7 +228,7 @@ def __init__(self, # --- self.__note = None if note: - self.__note = slice_quotes(note) + self.__note = utils.slice_quotes(note) else: errors.ErrorMsgBase.print(ErrorMsg.WRONG_NOTE) diff --git a/src/utils.py b/src/utils.py index 1173715..36a887c 100755 --- a/src/utils.py +++ b/src/utils.py @@ -4,6 +4,7 @@ import logging import os import re +from typing import Any, List from src import errors @@ -71,3 +72,25 @@ def expand_path(path): os.path.expandvars(path) ) ) + + +def slice_quotes(string: str) -> str: + """ + Gets rid of initial and terminating quotation marks inserted by Daylio + :param string: string to be sliced + :returns: string without quotation marks in the beginning and end of the initial string, even if it means empty str. + """ + if string is not None and len(string) > 2: + return string.strip("\"").strip() + # only 2 characters? Then it is an empty cell. + return "" + + +def strip_and_get_truthy(delimited_string: str, delimiter: str) -> List[str]: + """ + Pipe delimited strings may result in arrays that contain zero-length strings. + While such strings in itself are falsy, any array that has them is automatically truthy, unfortunately. + Therefore, I use list comprehension to discard such falsy values from an array and return the sanitised array. + :returns: array without falsy values, even if it results in empty (falsy) array + """ + return [el for el in slice_quotes(delimited_string).split(delimiter) if el] From c1656425393a9f364f557812470bb012b9293156 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Wed, 24 Jan 2024 23:06:40 +0100 Subject: [PATCH 39/40] null values shenenigans and better dated_entry.py tests Signed-off-by: DeutscheGabanna <thetobruk@duck.com> --- _tests/test_dated_entry.py | 81 ++++++++++++++++++++++++++++++++------ _tests/test_librarian.py | 4 +- _tests/test_utils.py | 5 +++ src/dated_entry.py | 35 ++++++++-------- src/utils.py | 22 ++++++----- 5 files changed, 107 insertions(+), 40 deletions(-) diff --git a/_tests/test_dated_entry.py b/_tests/test_dated_entry.py index 601c1b4..ca53d93 100644 --- a/_tests/test_dated_entry.py +++ b/_tests/test_dated_entry.py @@ -1,16 +1,14 @@ from unittest import TestCase +from src.config import options from src.dated_entry import \ Time, \ - slice_quotes, \ DatedEntry, \ IsNotTimeError, \ is_time_format_valid, \ is_time_range_valid -# TODO: more test coverage needed - class TestDatedEntryUtils(TestCase): def test_is_time_format_valid(self): self.assertTrue(is_time_format_valid("1:49 AM")) @@ -59,11 +57,6 @@ def test_is_time_range_valid(self): self.assertFalse(is_time_range_valid("13:00 AM")) self.assertFalse(is_time_range_valid("15:00 PM")) - def test_slice_quotes(self): - self.assertEqual("test", slice_quotes("\"test\"")) - self.assertEqual("", slice_quotes("\"\"")) - self.assertEqual("bicycle", slice_quotes("\" bicycle \"")) - class TestTime(TestCase): def test_try_creating_valid_times(self): @@ -82,9 +75,10 @@ def test_try_whitespaces(self): self.assertTrue(Time(" 1:49 AM ")) self.assertTrue(Time("02:15 AM ")) self.assertTrue(Time(" 12:00 PM")) - self.assertEqual("01:49 AM", Time(" 1:49 AM ")) - self.assertEqual("02:15 AM", Time("02:15 AM ")) - self.assertEqual("12:00 PM", Time(" 12:00 PM")) + # Leading 0 or not is consistent which what was passed, not with what the function thinks is best + self.assertEqual("1:49 AM", str(Time(" 1:49 AM "))) + self.assertEqual("02:15 AM", str(Time("02:15 AM "))) + self.assertEqual("12:00 PM", str(Time(" 12:00 PM"))) def test_try_creating_invalid_times(self): # Invalid time formats @@ -126,5 +120,70 @@ def test_bare_minimum_dated_entries(self): self.assertIsNone(bare_minimum_dated_entry.note) self.assertListEqual([], bare_minimum_dated_entry.activities) + def test_other_variants_of_dated_entries(self): + # When + entry = DatedEntry( + time="1:49 AM", + mood="vaguely ok", + title="Normal situation" + ) + + # Then + self.assertEqual("vaguely ok", entry.mood) + self.assertEqual("1:49 AM", str(entry.uid)) + self.assertEqual("Normal situation", entry.title) + self.assertIsNone(entry.note) + self.assertListEqual([], entry.activities) + + # When + entry = DatedEntry( + time="1:49 AM", + mood="vaguely ok", + title="Normal situation", + note="A completely normal situation just occurred." + ) + + # Then + self.assertEqual("vaguely ok", entry.mood) + self.assertEqual("1:49 AM", str(entry.uid)) + self.assertEqual("Normal situation", entry.title) + self.assertEqual("A completely normal situation just occurred.", entry.note) + self.assertListEqual([], entry.activities) + + # When + options.tag_activities = True + entry = DatedEntry( + time="1:49 AM", + mood="vaguely ok", + title="Normal situation", + note="A completely normal situation just occurred.", + activities="bicycle|chess|gaming" + ) + + # Then + self.assertEqual("vaguely ok", entry.mood) + self.assertEqual("1:49 AM", str(entry.uid)) + self.assertEqual("Normal situation", entry.title) + self.assertEqual("A completely normal situation just occurred.", entry.note) + self.assertListEqual(["#bicycle", "#chess", "#gaming"], entry.activities) + + # When + options.tag_activities = False + entry = DatedEntry( + time="1:49 AM", + mood="vaguely ok", + title="Normal situation", + note="A completely normal situation just occurred.", + activities="bicycle|chess|gaming" + ) + + # Then + self.assertEqual("vaguely ok", entry.mood) + self.assertEqual("1:49 AM", str(entry.uid)) + self.assertEqual("Normal situation", entry.title) + self.assertEqual("A completely normal situation just occurred.", entry.note) + self.assertListEqual(["bicycle", "chess", "gaming"], entry.activities) + def test_insufficient_dated_entries(self): self.assertRaises(ValueError, DatedEntry, time="2:00", mood="") + self.assertRaises(ValueError, DatedEntry, time=":00", mood="vaguely ok") diff --git a/_tests/test_librarian.py b/_tests/test_librarian.py index 0a0c4ea..f974538 100644 --- a/_tests/test_librarian.py +++ b/_tests/test_librarian.py @@ -2,6 +2,7 @@ from src import librarian from src.librarian import Librarian +from src.config import options class TestLibrarian(TestCase): @@ -133,9 +134,10 @@ def test_custom_moods_that_are_incomplete(self): However, it can only expand it (and be truthy) if the dict with moods has all required groups. Therefore, since ``incomplete-moods`` lacks the ``good`` group, the assertion will evaluate to False. """ + options.tag_activities = True lib_to_test = Librarian( "_tests/files/journal_CSVs/sheet-1-valid-data.csv", "_tests/files/output-results/", - "_tests/mood_JSONs/incomplete-moods.json" + "_tests/files/mood_JSONs/incomplete-moods.json" ) self.assertFalse(lib_to_test.current_mood_set.has_custom_moods) diff --git a/_tests/test_utils.py b/_tests/test_utils.py index 9bae4da..c1811ba 100644 --- a/_tests/test_utils.py +++ b/_tests/test_utils.py @@ -34,3 +34,8 @@ def test_expand_path(self): def test_strip_and_get_truthy(self): self.assertListEqual(["one", "two"], utils.strip_and_get_truthy("\"one||two|||||\"", "|")) self.assertListEqual([], utils.strip_and_get_truthy("\"\"", "|")) + + def test_slice_quotes(self): + self.assertEqual("test", utils.slice_quotes("\"test\"")) + self.assertIsNone(utils.slice_quotes("\"\"")) + self.assertEqual("bicycle", utils.slice_quotes("\" bicycle \"")) diff --git a/src/dated_entry.py b/src/dated_entry.py index e3f7b39..c87a503 100644 --- a/src/dated_entry.py +++ b/src/dated_entry.py @@ -128,7 +128,7 @@ def __init__(self, string: str): self.__logger = logging.getLogger(self.__class__.__name__) # OK - if is_time_format_valid(string) and is_time_range_valid(string): + if is_time_format_valid(string.strip()) and is_time_range_valid(string.strip()): time_array = string.strip().split(':') self.__hour = time_array[0] self.__minutes = time_array[1] @@ -193,7 +193,7 @@ def __init__(self, # --- # MOOD # --- - if len(mood) == 0: + if not mood: raise ValueError # Check if the mood is valid - i.e. it does exist in the currently used Moodverse if not override_mood_set.get_mood(mood): @@ -206,30 +206,27 @@ def __init__(self, # Process activities # --- self.__activities = [] - array = utils.strip_and_get_truthy(activities, options.csv_delimiter) - if len(array) > 0: - for activity in array: - self.__activities.append(utils.slugify( - activity, - options.tag_activities - )) - else: - errors.ErrorMsgBase.print(ErrorMsg.WRONG_ACTIVITIES) + if activities: + working_array = utils.strip_and_get_truthy(activities, options.csv_delimiter) + if len(working_array) > 0: + for activity in working_array: + self.__activities.append(utils.slugify( + activity, + options.tag_activities + )) + else: + errors.ErrorMsgBase.print(ErrorMsg.WRONG_ACTIVITIES) # --- # Process title # --- - self.__title = None - if title: - self.__title = utils.slice_quotes(title) - else: + self.__title = utils.slice_quotes(title) if title else None + if not title: errors.ErrorMsgBase.print(ErrorMsg.WRONG_TITLE) # --- # Process note # --- - self.__note = None - if note: - self.__note = utils.slice_quotes(note) - else: + self.__note = utils.slice_quotes(note) if note else None + if not note: errors.ErrorMsgBase.print(ErrorMsg.WRONG_NOTE) def output(self, stream: io.IOBase | typing.IO) -> int: diff --git a/src/utils.py b/src/utils.py index 36a887c..ef4eeb7 100755 --- a/src/utils.py +++ b/src/utils.py @@ -41,7 +41,7 @@ class StreamError(CustomException): pass -def slugify(text: str, taggify: bool): +def slugify(text: str, taggify: bool) -> str: # noinspection SpellCheckingInspection """ Simple slugification function to transform text. Works on non-latin characters too. @@ -60,7 +60,7 @@ def slugify(text: str, taggify: bool): return '#' + text if taggify else text -def expand_path(path): +def expand_path(path: str) -> str: """ Expand all %variables%, ~/home-directories and relative parts in the path. Return the expanded path. It does not use os.path.abspath() because it treats current script directory as root. @@ -74,16 +74,14 @@ def expand_path(path): ) -def slice_quotes(string: str) -> str: +def slice_quotes(string: str) -> str | None: """ Gets rid of initial and terminating quotation marks inserted by Daylio :param string: string to be sliced - :returns: string without quotation marks in the beginning and end of the initial string, even if it means empty str. + :returns: string without quotation marks in the beginning and end of the initial string, or nothing if "" provided. """ - if string is not None and len(string) > 2: - return string.strip("\"").strip() - # only 2 characters? Then it is an empty cell. - return "" + # only 2 characters? Then it is an empty cell, because Daylio wraps its values inside "" like so: "","","",""... + return string.strip("\"").strip() if string and len(string) > 2 else None def strip_and_get_truthy(delimited_string: str, delimiter: str) -> List[str]: @@ -93,4 +91,10 @@ def strip_and_get_truthy(delimited_string: str, delimiter: str) -> List[str]: Therefore, I use list comprehension to discard such falsy values from an array and return the sanitised array. :returns: array without falsy values, even if it results in empty (falsy) array """ - return [el for el in slice_quotes(delimited_string).split(delimiter) if el] + # I need to separate returning into the guard statement and actual return because slice_quotes can produce null vals + if delimited_string is None: + return [] + + sliced_del_string = slice_quotes(delimited_string) + + return [el for el in sliced_del_string.split(delimiter) if el] if sliced_del_string else [] From 07693d570a8251edb4604d24758f515b9fd4d2a6 Mon Sep 17 00:00:00 2001 From: DeutscheGabanna <thetobruk@duck.com> Date: Wed, 28 Feb 2024 19:23:00 +0100 Subject: [PATCH 40/40] add publish workflow --- .github/workflows/publish.yaml | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 .github/workflows/publish.yaml diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml new file mode 100644 index 0000000..5df27bb --- /dev/null +++ b/.github/workflows/publish.yaml @@ -0,0 +1,30 @@ +name: Upload Obsidian Daylio Parser + +on: + release: + types: [published] + +permissions: + contents: read + +jobs: + deploy: + + runs-on: ubuntu-latest + + steps: + - name: Check-out + uses: actions/checkout@v4 + - name: Set up Python env + uses: actions/setup-python@v5 + with: + python-version: '3.12' + - name: Build + run: | + python -m pip install build + python -m build + - name: Publish this release + uses: pypa/gh-action-pypi-publish@release/v1 + with: + user: __token__ + password: ${{ secrets.PYPI_API_TOKEN }}