From 1aaa6ba3686a5a14f2957b6b8d02ffc0903f6832 Mon Sep 17 00:00:00 2001 From: Niki Roo Date: Sun, 23 Sep 2018 06:04:04 +0200 Subject: [PATCH] Fix new tests and make TestLWN work --- changelog.md | 1 + ...es.jar => nikiroo-utils-4.4.3-sources.jar} | Bin 296606 -> 296617 bytes src/be/nikiroo/gofetch/test/TestBase.java | 19 +- src/be/nikiroo/gofetch/test/TestLWN.java | 29 +- test/expected/LWN/0000763252 | 1957 +++++++++++++++++ test/expected/LWN/0000763252.header | 3 + test/expected/LWN/0000763252.header.html | 20 + test/expected/LWN/0000763252.html | 25 + test/expected/LWN/0000763603 | 13 + test/expected/LWN/0000763603.header | 19 + test/expected/LWN/0000763603.header.html | 20 + test/expected/LWN/0000763603.html | 25 + test/expected/LWN/0000763729 | 13 + test/expected/LWN/0000763729.header | 13 + test/expected/LWN/0000763729.header.html | 20 + test/expected/LWN/0000763729.html | 25 + test/expected/LWN/0000763789 | 13 + test/expected/LWN/0000763789.header | 3 + test/expected/LWN/0000763789.header.html | 20 + test/expected/LWN/0000763789.html | 25 + test/expected/LWN/0000763987 | 19 + test/expected/LWN/0000763987.header | 8 + test/expected/LWN/0000763987.header.html | 20 + test/expected/LWN/0000763987.html | 25 + test/expected/LWN/0000764001 | 13 + test/expected/LWN/0000764001.header | 12 + test/expected/LWN/0000764001.header.html | 20 + test/expected/LWN/0000764001.html | 25 + test/expected/LWN/0000764046 | 490 +++++ test/expected/LWN/0000764046.header | 13 + test/expected/LWN/0000764046.header.html | 20 + test/expected/LWN/0000764046.html | 25 + test/expected/LWN/0000764048 | 13 + test/expected/LWN/0000764048.header | 13 + test/expected/LWN/0000764048.header.html | 20 + test/expected/LWN/0000764048.html | 25 + test/expected/LWN/0000764055 | 239 ++ test/expected/LWN/0000764055.header | 10 + test/expected/LWN/0000764055.header.html | 20 + test/expected/LWN/0000764055.html | 25 + test/expected/LWN/0000764057 | 13 + test/expected/LWN/0000764057.header | 16 + test/expected/LWN/0000764057.header.html | 20 + test/expected/LWN/0000764057.html | 25 + test/expected/LWN/0000764130 | 263 +++ test/expected/LWN/0000764130.header | 10 + test/expected/LWN/0000764130.header.html | 20 + test/expected/LWN/0000764130.html | 25 + test/expected/LWN/0000764131 | 13 + test/expected/LWN/0000764131.header | 14 + test/expected/LWN/0000764131.header.html | 20 + test/expected/LWN/0000764131.html | 25 + test/expected/LWN/0000764182 | 92 + test/expected/LWN/0000764182.header | 7 + test/expected/LWN/0000764182.header.html | 20 + test/expected/LWN/0000764182.html | 25 + test/expected/LWN/0000764184 | 26 + test/expected/LWN/0000764184.header | 7 + test/expected/LWN/0000764184.header.html | 20 + test/expected/LWN/0000764184.html | 25 + test/expected/LWN/0000764200 | 13 + test/expected/LWN/0000764200.header | 13 + test/expected/LWN/0000764200.header.html | 20 + test/expected/LWN/0000764200.html | 25 + test/expected/LWN/0000764202 | 23 + test/expected/LWN/0000764202.header | 12 + test/expected/LWN/0000764202.header.html | 20 + test/expected/LWN/0000764202.html | 25 + test/expected/LWN/0000764209 | 13 + test/expected/LWN/0000764209.header | 15 + test/expected/LWN/0000764209.header.html | 20 + test/expected/LWN/0000764209.html | 25 + test/expected/LWN/0000764219 | 161 ++ test/expected/LWN/0000764219.header | 11 + test/expected/LWN/0000764219.header.html | 20 + test/expected/LWN/0000764219.html | 25 + test/expected/LWN/0000764300 | 312 +++ test/expected/LWN/0000764300.header | 10 + test/expected/LWN/0000764300.header.html | 20 + test/expected/LWN/0000764300.html | 25 + test/expected/LWN/0000764321 | 88 + test/expected/LWN/0000764321.header | 16 + test/expected/LWN/0000764321.header.html | 20 + test/expected/LWN/0000764321.html | 50 + test/source/LWN/Articles/763252.html | 1864 ++++++++++++++++ test/source/LWN/Articles/763987.html | 136 ++ test/source/LWN/Articles/764046.html | 329 +++ test/source/LWN/Articles/764055.html | 242 ++ test/source/LWN/Articles/764130.html | 234 ++ test/source/LWN/Articles/764182.html | 164 ++ test/source/LWN/Articles/764184.html | 135 ++ test/source/LWN/Articles/764202.html | 142 ++ test/source/LWN/Articles/764219.html | 217 ++ test/source/LWN/Articles/764300.html | 259 +++ test/source/LWN/Articles/764321.html | 216 ++ test/source/LWN/index.html | 502 +++++ 96 files changed, 9421 insertions(+), 5 deletions(-) rename libs/{nikiroo-utils-4.4.2-sources.jar => nikiroo-utils-4.4.3-sources.jar} (98%) create mode 100644 test/expected/LWN/0000763252 create mode 100644 test/expected/LWN/0000763252.header create mode 100644 test/expected/LWN/0000763252.header.html create mode 100644 test/expected/LWN/0000763252.html create mode 100644 test/expected/LWN/0000763603 create mode 100644 test/expected/LWN/0000763603.header create mode 100644 test/expected/LWN/0000763603.header.html create mode 100644 test/expected/LWN/0000763603.html create mode 100644 test/expected/LWN/0000763729 create mode 100644 test/expected/LWN/0000763729.header create mode 100644 test/expected/LWN/0000763729.header.html create mode 100644 test/expected/LWN/0000763729.html create mode 100644 test/expected/LWN/0000763789 create mode 100644 test/expected/LWN/0000763789.header create mode 100644 test/expected/LWN/0000763789.header.html create mode 100644 test/expected/LWN/0000763789.html create mode 100644 test/expected/LWN/0000763987 create mode 100644 test/expected/LWN/0000763987.header create mode 100644 test/expected/LWN/0000763987.header.html create mode 100644 test/expected/LWN/0000763987.html create mode 100644 test/expected/LWN/0000764001 create mode 100644 test/expected/LWN/0000764001.header create mode 100644 test/expected/LWN/0000764001.header.html create mode 100644 test/expected/LWN/0000764001.html create mode 100644 test/expected/LWN/0000764046 create mode 100644 test/expected/LWN/0000764046.header create mode 100644 test/expected/LWN/0000764046.header.html create mode 100644 test/expected/LWN/0000764046.html create mode 100644 test/expected/LWN/0000764048 create mode 100644 test/expected/LWN/0000764048.header create mode 100644 test/expected/LWN/0000764048.header.html create mode 100644 test/expected/LWN/0000764048.html create mode 100644 test/expected/LWN/0000764055 create mode 100644 test/expected/LWN/0000764055.header create mode 100644 test/expected/LWN/0000764055.header.html create mode 100644 test/expected/LWN/0000764055.html create mode 100644 test/expected/LWN/0000764057 create mode 100644 test/expected/LWN/0000764057.header create mode 100644 test/expected/LWN/0000764057.header.html create mode 100644 test/expected/LWN/0000764057.html create mode 100644 test/expected/LWN/0000764130 create mode 100644 test/expected/LWN/0000764130.header create mode 100644 test/expected/LWN/0000764130.header.html create mode 100644 test/expected/LWN/0000764130.html create mode 100644 test/expected/LWN/0000764131 create mode 100644 test/expected/LWN/0000764131.header create mode 100644 test/expected/LWN/0000764131.header.html create mode 100644 test/expected/LWN/0000764131.html create mode 100644 test/expected/LWN/0000764182 create mode 100644 test/expected/LWN/0000764182.header create mode 100644 test/expected/LWN/0000764182.header.html create mode 100644 test/expected/LWN/0000764182.html create mode 100644 test/expected/LWN/0000764184 create mode 100644 test/expected/LWN/0000764184.header create mode 100644 test/expected/LWN/0000764184.header.html create mode 100644 test/expected/LWN/0000764184.html create mode 100644 test/expected/LWN/0000764200 create mode 100644 test/expected/LWN/0000764200.header create mode 100644 test/expected/LWN/0000764200.header.html create mode 100644 test/expected/LWN/0000764200.html create mode 100644 test/expected/LWN/0000764202 create mode 100644 test/expected/LWN/0000764202.header create mode 100644 test/expected/LWN/0000764202.header.html create mode 100644 test/expected/LWN/0000764202.html create mode 100644 test/expected/LWN/0000764209 create mode 100644 test/expected/LWN/0000764209.header create mode 100644 test/expected/LWN/0000764209.header.html create mode 100644 test/expected/LWN/0000764209.html create mode 100644 test/expected/LWN/0000764219 create mode 100644 test/expected/LWN/0000764219.header create mode 100644 test/expected/LWN/0000764219.header.html create mode 100644 test/expected/LWN/0000764219.html create mode 100644 test/expected/LWN/0000764300 create mode 100644 test/expected/LWN/0000764300.header create mode 100644 test/expected/LWN/0000764300.header.html create mode 100644 test/expected/LWN/0000764300.html create mode 100644 test/expected/LWN/0000764321 create mode 100644 test/expected/LWN/0000764321.header create mode 100644 test/expected/LWN/0000764321.header.html create mode 100644 test/expected/LWN/0000764321.html create mode 100644 test/source/LWN/Articles/763252.html create mode 100644 test/source/LWN/Articles/763987.html create mode 100644 test/source/LWN/Articles/764046.html create mode 100644 test/source/LWN/Articles/764055.html create mode 100644 test/source/LWN/Articles/764130.html create mode 100644 test/source/LWN/Articles/764182.html create mode 100644 test/source/LWN/Articles/764184.html create mode 100644 test/source/LWN/Articles/764202.html create mode 100644 test/source/LWN/Articles/764219.html create mode 100644 test/source/LWN/Articles/764300.html create mode 100644 test/source/LWN/Articles/764321.html create mode 100644 test/source/LWN/index.html diff --git a/changelog.md b/changelog.md index 730c6fd..38e5285 100644 --- a/changelog.md +++ b/changelog.md @@ -7,6 +7,7 @@ - Fix html output for LWN (do not allow raw text!) - Allow
 in all supports
 - Lot of other small fixes
+- New tests based upon static "example" content (not dynamic)
 
 ## Version 1.2.0
 
diff --git a/libs/nikiroo-utils-4.4.2-sources.jar b/libs/nikiroo-utils-4.4.3-sources.jar
similarity index 98%
rename from libs/nikiroo-utils-4.4.2-sources.jar
rename to libs/nikiroo-utils-4.4.3-sources.jar
index a2869e605140a1bc1f7da7108456ecec03a024fc..3385280388d78ebc6af7382814a586f341adc37d 100644
GIT binary patch
delta 3349
zcmYM0XIN8t6U8C9H0ix}(HM{_B^2o$R;m;eic}#;6@dUr=v@|p3n)#B0i^e$v=wos
zu1H6EQv^YQC<<>}_VN96@9&&5|7T`CO+hM4MJfxOnK2Y#BcrCKCexAnlTHPqBZW7N
zH6)KUgftsM(=>qtNkvo8NeK`JKg&WXuvTt?LE6rNZjxTk!Oov}QGs)9`*x@7(kl10
z#bh6L=6ZWPRzf&lSI$*7d}>(3=p!gZ)8J$YFp_=Rl04mOrIj$mJ#p2w4$DtbOB7?7
zei^w3u?52eUXN1nCO=CDEyS~|Z*E*_?3>o7q6#pF`^->uujE?I80RMIj~hSE$L4zt
zgXJDeCL-BcQ?;QpX9^f|+vn&hl<>!QbBsh9If41k`#rn$mPd0B+9f1sFhWFb#g(`j
zL@TCP^9(6xkEMGt63SyE2N3|yhR
zxy~?dvYn{Zw?CxT>B#I1Ek!RGBJNS=W#te(b!Fr~%^Iv0O&lcM>P=baXv;CYC}Q&x
z0rh
zm?&5K497i25v?V^2|i|%UQS3wP4hmTkW*%(7P6L04>pzI{laf)7eTBin33I~)ZEYs
zNxedU^Z4s*`!(+CQG9BqX|t0CrOvF@ICE=O+V2-X25ks?pex``XP}Xney8%m8U+(K
z##+g0vKS(w$B-ndQ&8oNZIE;3ho)sxNWr;oHHdGUJ`+t>@7+<99;c<8nfN;?k;01p
z=~c`f3s}4&cW@i#GTOQ&!1!_A6j`p)W{`Xo(jcT1+Iu)|h2pHrbQsnB96Ull#^8HxQN5W1O9)}mL2LVYCJ6KM
ztW_uLjqbx;^EMrtOVy*cnup|*(ZNhXQXVc963@oCZo*x=2EqR8Mw-VJ<~#OEZwiMH
z2}ztqT^solrLhSYvFs-HnHN21>$zz8uVP;cqf-O%f2`K^sizUF>XC#C1<6{Tf;I8p
z)p>s|`>wx=#wDg{3dkluWEyMn%(>a2acu4K<66
zmQ-#Qsw#j@Xf`|QE~VCfJGXAu<^oeaUe?$1`aj3HJop&7qalmSXY&J&`3o;p3xT&E
z@lS+P{^ob|0un5YWPPtc&ZqBgoA1leK(jsJ4PY+4MSW2Ug}oP)Q+Kk{3zdLU3LsQe
z{a4&?*d6Ra;*`$}rC9TZ&HThb
zd_B1ko^{6d3~%cfjnEq0#f2s1-Z$%VspXt57U+>mA-Jn@p_z-@(6l3|-wgUNiMf~{cimaLTPj@QQ
zA8VQ?e=YLQ7E(#y0C4PTeXxte77SGExcGUoREvv|D6W?y;63YSm!_CocXa|5Oe
zp0*?KK~VLIesutozHr*W8_^#@uuER#3b0i8DGt6$OI!wz7}m`n~CpLpob}GDAl;UgbhkE
zJnG$GQrts2&=B-BYRZ$v--&E;4A{LU4=aBc<(xw`BGV@RTt_atHcLUtySV&zymzsb
zZ>8jR*T%XFqrTmux@ss(QsC6^L8UI0feZFa&57x0JTlcbu)f92W+WQwI8iiOz4XFsBQWZ@cf
z1nc2(K}R&z^y`WlsgsvPwx9~%3R!9NvwIC`G_R}7%HAF6Q
z@~;&nL{8{9B`DW@fr+XZ#qoQ{7a)Uv}l$(2PBvi9)=|wWcVOI^mR^PDcYp=01}_$-&L%$!p)l}mHfqKruv
zI=v*Fr`SjZl^!!oDK1uc6TH!i4cIB<&yW-P(;oGXrEH5iqWFgFjNaDO@yP>Hv1x>X0+V|Cn)fXI1i*nX>@b4eQSP!SxX->NCgplj*XWs{YTrTkE@!NW($Txoflr4%F=ypE>
zw=?3ab{I)~YP4ZV;2rq7ojm0!p+pc9ZeyKVWy<|R);B&OzeU05%}JmNwbLp$=0``U
ze(F={G8ksMz;_T;WcvvcO_wod)6u^x&Gnh$eY@5i>(M6rY)YL+q{NT-ZnKuHCnF6K
z74by|&*>~QyqV?GtsT-UrpAVQ_soo`sIw#fFh8XsBTJ5FFoCu*m;ewa(p3qvAOQsF
z+iHpgMtJFeot3|XAS?v94dF8ZmqP$9P}c;?47P^>bYO1?AV59>E`+)<22h6ZrJaLD8k819gp=&4v2&$69H0hqzZ8b?
z--;r{=>I?uV#c9NAS{BEq5tQ1h8pyZ__zDI2;c_!tL#X?075nkl1BpkpyVv6AP+~H
zq0SEg!1zdj2eQ3(?)5|ha*$`+=PUd(TTzB={2wSl#TY;bG9&q~oh=10gNgy9!JA?L
z9&&wfGzKsyM}fkzfElDi{rriufM0thZyy^4%>)hA(zRq>-bN)J|$;jMfB!`Is0P-@hu&^*Fo=oG=fGeJi
z)+6h|$a?U4Js?F3-X#uAX#r|@;l_=J|MHCmrK?R-rd}CN3r;;R`dKtzdMm_JBg$i!
zAw8dECA5h_^_5}8LGF9TH7-?A=|@kpQglUad0}5&UWC9Z49+(%ED@7+Ec*>g9}LLI
z2HdlD{#rx=nx!5zd?h+=7?yo~GSpM#fv|FaYP0!d<)P}mcsIA7$_l%>wRjYHaxi$7
z@3}+U9wD!W+SwMt{8d!L?ZsZ^AT_;>nV9D%?wy$vFnFq!!K1jGXm6mwZI!c8mN`Uj
ze}#bU@1|J=SDo!@o7*J;=g(PgCb+pNk1WO*`Fk7IGVZQA5oWj*M_H}
z#3T#r%=lC)lw1ZXTrTIL@EG^(VO5;(07KSTQ%q-7He&dGZoGK*=(5IMJd5x#H$Znl
z`nX513$=)^hf)_Io=h?ul=WsrXV}iUoJxzg<6Gj$ml(zKPj^gnSLfCq*?upG<8wON
zm%}*`Vrf8lO%%3BB1R>Z5!R2dswi4W2ihonGf#E;blsMMjmN<8FHHj$oY~qDxS+~p
zcLrE(HXk*-NAY5FhTSG(^y?pUsXAmh4$Z3cBj#=9gj0xab&kQkk5+e!iV{k^lC_1-
znKBAkC_l?L_Q|;>>ff7D2H(b)K0Y-)nI^74=zmvEG_k(OsGY=;D6XAbe$&VOB@)mc
zAfot&PES1uJoSbL-=F8SC46-mB;E3K>*gF$~gw88ch&eZdLb3Xao|N7n-*1nS)%s&O$%*Up_laX^<%tZl~QFG
zT5LS$eYHygY3-vF46ey#R4(Y-sMoK_snx;F73XlQ$=R44|Go!8utJLDLlv=+kyuAN
zZ)d$@=h)JUrHFy-(F+n5x^b*6qoSA&C8xQ&wOvP$DiQdBbUaFRwJ+5=Jt=d!?BkT3
zp?r>0dv)!HnGHtmXyHrJNBmF6dHU;Msjhkjg^pBjE~1G;0wU4;=rYL<4eNF<&FA7K
zu(~8=4{JN>`_(V7Q>B9J&ef%0W^(Ty9|$CQp12~lZc2Dn^e(I9%v>9PZ^uTF)jz7-
zY6N~KFEsu^@zBl$c^`MoF?qXHmt1(`Y*UfGmd%Z6MLczHdB*4Fr&f2KN1acNE9@iQ
zRbeZb(NBjdH0<|Db#Elno;LqfrRi5QA(B&~Mfk@YC#N`(spb2OvwE;@0;iPg%2uWC
zRiCLx${41;REP3DVA4v|a(U=oVr+Z(&Mmero)kQqVIY2?vG(#%7tftu!015bTl<~U
zGd&rc;d|Lvc*Ngl753>b5>bxLK4J2JGDTd&gDS~9ExEr&*+1*M+%hq47yixCs`4lDF{g(m^ncA0_gMHhUz^SFMoQG|!2P3qG%@(YT+T~Wu#f3nP
zlJ|JA{Pn4YwV&Uo26UfcZ~uGJBoOHSXyASrmlmjqD)=U{1z)E
zFE*>TBJBm0Q7t<_q(p2z?oGY-=vwVUCf|S(3Du)RZlr{gWh<{{cIR0K>35>K>TO!G33!KShSwkC*Dvig{^#@r)^Z
zjn<7T8_ZV(9Xq&R)Zz0(Nuk<~nvF64d}F%S*wkpeH`~*9tVnKOxtY
zGn8ukc22JJW@6Cpc{e@RDAx0D3Kmaw74r%fVA*PV1#HDCzxeJTpX3otHD@SGXIc2h
ze91y+6R+l!hNP_8nv}tk#7!E42Y3B`!|>}6=4SCiv3f~G?W1z3W8RcWsq1)WUhmIi
z+ZOl6EvwczJO{O|jeK8>5xuhhHvg}937-22qL*3>SR8j9aF+ms4qGMR!V<@%6L_}r
z>DSLp!`ymikK>Am?;wAuxc*36nlO!o30)m(K1QjT(n`z^3K!b^?fzun)FJXtz2
zpkBzqgw1NcebQLIsrizHb2aJJMJOW5Go
zI;AmPlkN>#kDY0VA`{6io^gW)zf$%kGt`_}l&Vv4P??R`$CnqbaB+!z#J}7;@9|B`
zh*^$1p$yeBxkXmuLrLCZDnyFXx1DYfo*3J0hU==PeBt+*ggei&W;Hwt#$gj!Hn}Rh
zx{k;{rny(RUX*%{R)T%Cm9R}@*gvIc_Vx}lP}62|v4HbjR_~xK>y`qShhV6+wf$tF
zzv=Ox-}!NY^WXR*3)xDh@k0IKA5S+VX5g5z96C~I@IKt%+X*_T>n*+Qx@mHrUqtGT
zsyL7|^PNaToQd4Rf!-0Bw3wM=CRLTY_I^S?2$Tw^BNMmF!}Ua-#Q!*0joH3DrGk)G
z2@be5+xl@OMMDPjxir&n?5Cz3nv$S!6upj8owPuK#dIji27z_U|s)tt#37OjGaY|K(p@-P@s<9!0BXPpdi~ndaxn
zj*moJSj)`5(cJHH>?JzP>3q?xwElPGpjftv_HrqMsi=k&niKur#ZhtZ1a&8K?~Cwe
zUTDvaj)JUN-}w(ad7SuM&9cHTyH0uLhCnw)!<&UTAWN0B#uZnUBV(wK~PfBzUr*I`HX=H)@^nL-WO%
zBil=S(@8b%-urPce%>0_$#JtbGu9do6MRf~bHejjdG;~$rv+IE(k#rxfSDzf3EjNO
z%D`X~Cuj&X3L1iN1pUhhiP(W^^tJJ-9XKM)*(I)G^nW9S4h3BR9zy^cvjtF)i6OuP
zb%cU4K$qzs1cuQ!Z(Bl|VW1g+wEaWN!@vtb$)S>k!b3m~NFp2*1-h^OG1!NL8o-G6
zAI0edAP-$*bQi1+qJ@J-FeHSH0QCV>GyrKv1IMBA2>QV`cL4hT2L;h0KxKd@1%T92
z01-$#5|n`rK|Yb-WjHVn`9^}m&_pDt0N4B@M591WIDF!;6CU;7bd&VMCjoJ2E((-^
zw@zi?K@mv$Hoe|DtktFg1k@c4BB1o!pb-4S!f(gmZ4eDBfDUeh?(mJTzY*QY4&B9r
z7EI>+

diff --git a/src/be/nikiroo/gofetch/test/TestBase.java b/src/be/nikiroo/gofetch/test/TestBase.java
index e8bfde0..1da6b48 100644
--- a/src/be/nikiroo/gofetch/test/TestBase.java
+++ b/src/be/nikiroo/gofetch/test/TestBase.java
@@ -19,6 +19,13 @@ import be.nikiroo.utils.test.TestLauncher;
 
 /**
  * Base class for {@link BasicSupport}s testing.
+ * 

+ * It will use the paths: + *

    + *
  • test/XXX/source: the html source files
  • + *
  • test/XXX/expected: the expected output
  • + *
  • test/XXX/actual: the actual output of the last test
  • + *
* * @author niki */ @@ -28,15 +35,16 @@ abstract class TestBase extends TestLauncher { addTest(support); } - static protected InputStream doOpen(Map map, URL url) - throws IOException { + static protected InputStream doOpen(BasicSupport support, + Map map, URL url) throws IOException { File file = map.get(url); if (file == null) { throw new FileNotFoundException("Test file not found for URL: " + url); } - return new FileInputStream(file); + return new FileInputStream("test/source/" + support.getType() + "/" + + file); } @@ -47,10 +55,15 @@ abstract class TestBase extends TestLauncher { File expected = new File("test/expected/" + support.getType()); File actual = new File("test/result/" + support.getType()); + IOUtils.deltree(actual); + expected.mkdirs(); + actual.mkdirs(); + Output gopher = new Gopher(support.getType(), "", "", 70); Output html = new Html(support.getType(), "", "", 80); for (Story story : support.list()) { + support.fetch(story); IOUtils.writeSmallFile(new File(actual, story.getId() + ".header"), gopher.exportHeader(story)); IOUtils.writeSmallFile( diff --git a/src/be/nikiroo/gofetch/test/TestLWN.java b/src/be/nikiroo/gofetch/test/TestLWN.java index 597e761..1025813 100644 --- a/src/be/nikiroo/gofetch/test/TestLWN.java +++ b/src/be/nikiroo/gofetch/test/TestLWN.java @@ -15,7 +15,32 @@ public class TestLWN extends TestBase { static private Map getMap() throws MalformedURLException { Map map = new HashMap(); - map.put(new URL("http://fanfan.be/"), new File("/tmp/none")); + + map.put(new URL("https://lwn.net/"), new File("index.html")); + + map.put(new URL("https://lwn.net/Articles/763252/"), new File( + "Articles/763252.html")); + map.put(new URL("https://lwn.net/Articles/763987/"), new File( + "Articles/763987.html")); + map.put(new URL("https://lwn.net/Articles/764046"), new File( + "Articles/764046.html")); + map.put(new URL("https://lwn.net/Articles/764055"), new File( + "Articles/764055.html")); + map.put(new URL("https://lwn.net/Articles/764130"), new File( + "Articles/764130.html")); + map.put(new URL("https://lwn.net/Articles/764182"), new File( + "Articles/764182.html")); + map.put(new URL("https://lwn.net/Articles/764184/"), new File( + "Articles/764184.html")); + map.put(new URL("https://lwn.net/Articles/764202/"), new File( + "Articles/764202.html")); + map.put(new URL("https://lwn.net/Articles/764219"), new File( + "Articles/764219.html")); + map.put(new URL("https://lwn.net/Articles/764300"), new File( + "Articles/764300.html")); + map.put(new URL("https://lwn.net/Articles/764321/"), new File( + "Articles/764321.html")); + return map; } @@ -23,7 +48,7 @@ public class TestLWN extends TestBase { super(new LWN() { @Override protected InputStream open(URL url) throws IOException { - return doOpen(getMap(), url); + return doOpen(this, getMap(), url); } @Override diff --git a/test/expected/LWN/0000763252 b/test/expected/LWN/0000763252 new file mode 100644 index 0000000..f5a2048 --- /dev/null +++ b/test/expected/LWN/0000763252 @@ -0,0 +1,1957 @@ + LWN.NET WEEKLY EDITION FOR AUGUST 30, 2018 + + + + o News link: https://lwn.net/Articles/763252/ + o Source link: + + + [1]Welcome to the LWN.net Weekly Edition for August 30, 2018 + This edition contains the following feature content: + + [2]An introduction to the Julia language, part 1 : Julia is a + language designed for intensive numerical calculations; this + article gives an overview of its core features. + + [3]C considered dangerous : a Linux Security Summit talk on + what is being done to make the use of C in the kernel safer. + + [4]The second half of the 4.19 merge window : the final + features merged (or not merged) before the merge window closed + for this cycle. + + [5]Measuring (and fixing) I/O-controller throughput loss : the + kernel's I/O controllers can provide useful bandwidth + guarantees, but at a significant cost in throughput. + + [6]KDE's onboarding initiative, one year later : what has gone + right in KDE's effort to make it easier for contributors to + join the project, and what remains to be done. + + [7]Sharing and archiving data sets with Dat : an innovative + approach to addressing and sharing data on the net. + + This week's edition also includes these inner pages: + + [8]Brief items : Brief news items from throughout the + community. + + [9]Announcements : Newsletters, conferences, security updates, + patches, and more. + + Please enjoy this week's edition, and, as always, thank you + for supporting LWN.net. + + [10]Comments (none posted) + + [11]An introduction to the Julia language, part 1 + + August 28, 2018 + + This article was contributed by Lee Phillips + + [12]Julia is a young computer language aimed at serving the + needs of scientists, engineers, and other practitioners of + numerically intensive programming. It was first publicly + released in 2012. After an intense period of language + development, version 1.0 was [13]released on August 8. The 1.0 + release promises years of language stability; users can be + confident that developments in the 1.x series will not break + their code. This is the first part of a two-part article + introducing the world of Julia. This part will introduce + enough of the language syntax and constructs to allow you to + begin to write simple programs. The following installment will + acquaint you with the additional pieces needed to create real + projects, and to make use of Julia's ecosystem. + + Goals and history + + The Julia project has ambitious goals. It wants the language + to perform about as well as Fortran or C when running + numerical algorithms, while remaining as pleasant to program + in as Python. I believe the project has met these goals and is + poised to see increasing adoption by numerical researchers, + especially now that an official, stable release is available. + + The Julia project maintains a [14]micro-benchmark page that + compares its numerical performance against both statically + compiled languages (C, Fortran) and dynamically typed + languages (R, Python). While it's certainly possible to argue + about the relevance and fairness of particular benchmarks, the + data overall supports the Julia team's contention that Julia + has generally achieved parity with Fortran and C; the + benchmark source code is available. + + Julia began as research in computer science at MIT; its + creators are Alan Edelman, Stefan Karpinski, Jeff Bezanson, + and Viral Shah. These four remain active developers of the + language. They, along with Keno Fischer, co-founder and CTO of + [15]Julia Computing , were kind enough to share their thoughts + with us about the language. I'll be drawing on their comments + later on; for now, let's get a taste of what Julia code looks + like. + + Getting started + + To explore Julia initially, start up its standard + [16]read-eval-print loop (REPL) by typing julia at the + terminal, assuming that you have installed it. You will then + be able to interact with what will seem to be an interpreted + language — but, behind the scenes, those commands are being + compiled by a just-in-time (JIT) compiler that uses the + [17]LLVM compiler framework . This allows Julia to be + interactive, while turning the code into fast, native machine + instructions. However, the JIT compiler passes sometimes + introduce noticeable delays at the REPL, especially when using + a function for the first time. + + To run a Julia program non-interactively, execute a command + like: $ julia script.jl + + Julia has all the usual data structures: numbers of various + types (including complex and rational numbers), + multidimensional arrays, dictionaries, strings, and + characters. Functions are first-class: they can be passed as + arguments to other functions, can be members of arrays, and so + on. + + Julia embraces Unicode. Strings, which are enclosed in double + quotes, are arrays of Unicode characters, which are enclosed + in single quotes. The " * " operator is used for string and + character concatenation. Thus 'a' and 'β' are characters, and + 'aβ' is a syntax error. "a" and "β" are strings, as are "aβ", + 'a' * 'β', and "a" * "β" — all evaluate to the same string. + + Variable and function names can contain non-ASCII characters. + This, along with Julia's clever syntax that understands + numbers prepended to variables to mean multiplication, goes a + long way to allowing the numerical scientist to write code + that more closely resembles the compact mathematical notation + of the equations that usually lie behind it. julia ε₁ = 0.01 + + 0.01 + + julia ε₂ = 0.02 + + 0.02 + + julia 2ε₁ + 3ε₂ + + 0.08 + + And where does Julia come down on the age-old debate of what + do about 1/2 ? In Fortran and Python 2, this will get you 0, + since 1 and 2 are integers, and the result is rounded down to + the integer 0. This was deemed inconsistent, and confusing to + some, so it was changed in Python 3 to return 0.5 — which is + what you get in Julia, too. + + While we're on the subject of fractions, Julia can handle + rational numbers, with a special syntax: 3//5 + 2//3 returns + 19//15 , while 3/5 + 2/3 gets you the floating-point answer + 1.2666666666666666. Internally, Julia thinks of a rational + number in its reduced form, so the expression 6//8 == 3//4 + returns true , and numerator(6//8) returns 3 . + + Arrays + + Arrays are enclosed in square brackets and indexed with an + iterator that can contain a step value: julia a = [1, 2, 3, + 4, 5, 6] + + 6-element Array{Int64,1}: + + 1 + + 2 + + 3 + + 4 + + 5 + + 6 + + julia a[1:2:end] + + 3-element Array{Int64,1}: + + 1 + + 3 + + 5 + + As you can see, indexing starts at one, and the useful end + index means the obvious thing. When you define a variable in + the REPL, Julia replies with the type and value of the + assigned data; you can suppress this output by ending your + input line with a semicolon. + + Since arrays are such a vital part of numerical computation, + and Julia makes them easy to work with, we'll spend a bit more + time with them than the other data structures. + + To illustrate the syntax, we can start with a couple of 2D + arrays, defined at the REPL: julia a = [1 2 3; 4 5 6] + + 2×3 Array{Int64,2}: + + 1 2 3 + + 4 5 6 + + julia z = [-1 -2 -3; -4 -5 -6]; + + Indexing is as expected: julia a[1, 2] + + 2 + + You can glue arrays together horizontally: julia [a z] + + 2×6 Array{Int64,2}: + + 1 2 3 -1 -2 -3 + + 4 5 6 -4 -5 -6 + + And vertically: julia [a; z] + + 4×3 Array{Int64,2}: + + 1 2 3 + + 4 5 6 + + -1 -2 -3 + + -4 -5 -6 + + Julia has all the usual operators for handling arrays, and + [18]linear algebra functions that work with matrices (2D + arrays). The linear algebra functions are part of Julia's + standard library, but need to be imported with a command like + " using LinearAlgebra ", which is a detail omitted from the + current documentation. The functions include such things as + determinants, matrix inverses, eigenvalues and eigenvectors, + many kinds of matrix factorizations, etc. Julia has not + reinvented the wheel here, but wisely uses the [19]LAPACK + Fortran library of battle-tested linear algebra routines. + + The extension of arithmetic operators to arrays is usually + intuitive: julia a + z + + 2×3 Array{Int64,2}: + + 0 0 0 + + 0 0 0 + + And the numerical prepending syntax works with arrays, too: + julia 3a + 4z + + 2×3 Array{Int64,2}: + + -1 -2 -3 + + -4 -5 -6 + + Putting a multiplication operator between two matrices gets + you matrix multiplication: julia a * transpose(a) + + 2×2 Array{Int64,2}: + + 14 32 + + 32 77 + + You can "broadcast" numbers to cover all the elements in an + array by prepending the usual arithmetic operators with a dot: + julia 1 .+ a + + 2×3 Array{Int64,2}: + + 2 3 4 + + 5 6 7 + + Note that the language only actually requires the dot for some + operators, but not for others, such as "*" and "/". The + reasons for this are arcane, and it probably makes sense to be + consistent and use the dot whenever you intend broadcasting. + Note also that the current version of the official + documentation is incorrect in claiming that you may omit the + dot from "+" and "-"; in fact, this now gives an error. + + You can use the dot notation to turn any function into one + that operates on each element of an array: julia + round.(sin.([0, π/2, π, 3π/2, 2π])) + + 5-element Array{Float64,1}: + + 0.0 + + 1.0 + + 0.0 + + -1.0 + + -0.0 + + The example above illustrates chaining two dotted functions + together. The Julia compiler turns expressions like this into + "fused" operations: instead of applying each function in turn + to create a new array that is passed to the next function, the + compiler combines the functions into a single compound + function that is applied once over the array, creating a + significant optimization. + + You can use this dot notation with any function, including + your own, to turn it into a version that operates element-wise + over arrays. + + Dictionaries (associative arrays) can be defined with several + syntaxes. Here's one: julia d1 = Dict("A"=1, "B"=2) + + Dict{String,Int64} with 2 entries: + + "B" = 2 + + "A" = 1 + + You may have noticed that the code snippets so far have not + included any type declarations. Every value in Julia has a + type, but the compiler will infer types if they are not + specified. It is generally not necessary to declare types for + performance, but type declarations sometimes serve other + purposes, that we'll return to later. Julia has a deep and + sophisticated type system, including user-defined types and + C-like structs. Types can have behaviors associated with them, + and can inherit behaviors from other types. The best thing + about Julia's type system is that you can ignore it entirely, + use just a few pieces of it, or spend weeks studying its + design. + + Control flow + + Julia code is organized in blocks, which can indicate control + flow, function definitions, and other code units. Blocks are + terminated with the end keyword, and indentation is not + significant. Statements are separated either with newlines or + semicolons. + + Julia has the typical control flow constructs; here is a while + block: julia i = 1; + + julia while i 5 + + print(i) + + global i = i + 1 + + end + + 1234 + + Notice the global keyword. Most blocks in Julia introduce a + local scope for variables; without this keyword here, we would + get an error about an undefined variable. + + Julia has the usual if statements and for loops that use the + same iterators that we introduced above for array indexing. We + can also iterate over collections: julia for i ∈ ['a', 'b', + 'c'] + + println(i) + + end + + a + + b + + c + + In place of the fancy math symbol in this for loop, we can use + " = " or " in ". If you want to use the math symbol but have + no convenient way to type it, the REPL will help you: type " + \in " and the TAB key, and the symbol appears; you can type + many [20]LaTeX expressions into the REPL in this way. + + Development of Julia + + The language is developed on GitHub, with over 700 + contributors. The Julia team mentioned in their email to us + that the decision to use GitHub has been particularly good for + Julia, as it streamlined the process for many of their + contributors, who are scientists or domain experts in various + fields, rather than professional software developers. + + The creators of Julia have [21]published [PDF] a detailed + “mission statement” for the language, describing their aims + and motivations. A key issue that they wanted their language + to solve is what they called the "two-language problem." This + situation is familiar to anyone who has used Python or another + dynamic language on a demanding numerical problem. To get good + performance, you will wind up rewriting the numerically + intensive parts of the program in C or Fortran, dealing with + the interface between the two languages, and may still be + disappointed in the overhead presented by calling the foreign + routines from your original code. + + For Python, [22]NumPy and SciPy wrap many numerical routines, + written in Fortran or C, for efficient use from that language, + but you can only take advantage of this if your calculation + fits the pattern of an available routine; in more general + cases, where you will have to write a loop over your data, you + are stuck with Python's native performance, which is orders of + magnitude slower. If you switch to an alternative, faster + implementation of Python, such as [23]PyPy , the numerical + libraries may not be compatible; NumPy became available for + PyPy only within about the past year. + + Julia solves the two-language problem by being as expressive + and simple to program in as a dynamic scripting language, + while having the native performance of a static, compiled + language. There is no need to write numerical libraries in a + second language, but C or Fortran library routines can be + called using a facility that Julia has built-in. Other + languages, such as [24]Python or [25]R , can also interoperate + easily with Julia using external packages. + + Documentation + + There are many resources to turn to to learn the language. + There is an extensive and detailed [26]manual at Julia + headquarters, and this may be a good place to start. However, + although the first few chapters provide a gentle introduction, + the material soon becomes dense and, at times, hard to follow, + with references to concepts that are not explained until later + chapters. Fortunately, there is a [27]"learning" link at the + top of the Julia home page, which takes you to a long list of + videos, tutorials, books, articles, and classes both about + Julia and that use Julia in teaching subjects such a numerical + analysis. There is also a fairly good [28]cheat-sheet [PDF] , + which was just updated for v. 1.0. + + If you're coming from Python, [29]this list of noteworthy + differences between Python and Julia syntax will probably be + useful. + + Some of the linked tutorials are in the form of [30]Jupyter + notebooks — indeed, the name "Jupyter" is formed from "Julia", + "Python", and "R", which are the three original languages + supported by the interface. The [31]Julia kernel for Jupyter + was recently upgraded to support v. 1.0. Judicious sampling of + a variety of documentation sources, combined with liberal + experimentation, may be the best way of learning the language. + Jupyter makes this experimentation more inviting for those who + enjoy the web-based interface, but the REPL that comes with + Julia helps a great deal in this regard by providing, for + instance, TAB completion and an extensive help system invoked + by simply pressing the "?" key. + + Stay tuned + + The [32]next installment in this two-part series will explain + how Julia is organized around the concept of "multiple + dispatch". You will learn how to create functions and make + elementary use of Julia's type system. We'll see how to + install packages and use modules, and how to make graphs. + Finally, Part 2 will briefly survey the important topics of + macros and distributed computing. + + [33]Comments (80 posted) + + [34]C considered dangerous + + By Jake Edge + + August 29, 2018 + + [35]LSS NA + + At the North America edition of the [36]2018 Linux Security + Summit (LSS NA), which was held in late August in Vancouver, + Canada, Kees Cook gave a presentation on some of the dangers + that come with programs written in C. In particular, of + course, the Linux kernel is mostly written in C, which means + that the security of our systems rests on a somewhat dangerous + foundation. But there are things that can be done to help firm + things up by " Making C Less Dangerous " as the title of his + talk suggested. + + He began with a brief summary of the work that he and others + are doing as part of the [37]Kernel Self Protection Project + (KSPP). The goal of the project is to get kernel protections + merged into the mainline. These protections are not targeted + at protecting user-space processes from other (possibly rogue) + processes, but are, instead, focused on protecting the kernel + from user-space code. There are around 12 organizations and + ten individuals working on roughly 20 different technologies + as part of the KSPP, he said. The progress has been "slow and + steady", he said, which is how he thinks it should go. [38] + + One of the main problems is that C is treated mostly like a + fancy assembler. The kernel developers do this because they + want the kernel to be as fast and as small as possible. There + are other reasons, too, such as the need to do + architecture-specific tasks that lack a C API (e.g. setting up + page tables, switching to 64-bit mode). + + But there is lots of undefined behavior in C. This + "operational baggage" can lead to various problems. In + addition, C has a weak standard library with multiple utility + functions that have various pitfalls. In C, the content of + uninitialized automatic variables is undefined, but in the + machine code that it gets translated to, the value is whatever + happened to be in that memory location before. In C, a + function pointer can be called even if the type of the pointer + does not match the type of the function being called—assembly + doesn't care, it just jumps to a location, he said. + + The APIs in the standard library are also bad in many cases. + He asked: why is there no argument to memcpy() to specify the + maximum destination length? He noted a recent [39]blog post + from Raph Levien entitled "With Undefined Behavior, Anything + is Possible". That obviously resonated with Cook, as he + pointed out his T-shirt—with the title and artwork from the + post. + + Less danger + + He then moved on to some things that kernel developers can do + (and are doing) to get away from some of the dangers of C. He + began with variable-length arrays (VLAs), which can be used to + overflow the stack to access data outside of its region. Even + if the stack has a guard page, VLAs can be used to jump past + it to write into other memory, which can then be used by some + other kind of attack. The C language is "perfectly fine with + this". It is easy to find uses of VLAs with the -Wvla flag, + however. + + But it turns out that VLAs are [40]not just bad from a + security perspective , they are also slow. In a + micro-benchmark associated with a [41]patch removing a VLA , a + 13% performance boost came from using a fixed-size array. He + dug in a bit further and found that much more code is being + generated to handle a VLA, which explains the speed increase. + Since Linus Torvalds has [42]declared that VLAs should be + removed from the kernel because they cause security problems + and also slow the kernel down; Cook said "don't use VLAs". + + Another problem area is switch statements, in particular where + there is no break for a case . That could mean that the + programmer expects and wants to fall through to the next case + or it could be that the break was simply forgotten. There is a + way to get a warning from the compiler for fall-throughs, but + there needs to be a way to mark those that are truly meant to + be that way. A special fall-through "statement" in the form of + a comment is what has been agreed on within the + static-analysis community. He and others have been going + through each of the places where there is no break to add + these comments (or a break ); they have "found a lot of bugs + this way", he said. + + Uninitialized local variables will generate a warning, but not + if the variable is passed in by reference. There are some GCC + plugins that will automatically initialize these variables, + but there are also patches for both GCC and Clang to provide a + compiler option to do so. Neither of those is upstream yet, + but Torvalds has praised the effort so the kernel would likely + use the option. An interesting side effect that came about + while investigating this was a warning he got about + unreachable code when he enabled the auto-initialization. + There were two variables declared just after a switch (and + outside of any case ), where they would never be reached. + + Arithmetic overflow is another undefined behavior in C that + can cause various problems. GCC can check for signed overflow, + which performs well (the overhead is in the noise, he said), + but adding warning messages for it does grow the kernel by 6%; + making the overflow abort, instead, only adds 0.1%. Clang can + check for both signed and unsigned overflow; signed overflow + is undefined, while unsigned overflow is defined, but often + unexpected. Marking places where unsigned overflow is expected + is needed; it would be nice to get those annotations put into + the kernel, Cook said. + + Explicit bounds checking is expensive. Doing it for + copy_{to,from}_user() is a less than 1% performance hit, but + adding it to the strcpy() and memcpy() families are around a + 2% hit. Pre-Meltdown that would have been a totally impossible + performance regression for security, he said; post-Meltdown, + since it is less than 5%, maybe there is a chance to add this + checking. + + Better APIs would help as well. He pointed to the evolution of + strcpy() , through str n cpy() and str l cpy() (each with + their own bounds flaws) to str s cpy() , which seems to be "OK + so far". He also mentioned memcpy() again as a poor API with + respect to bounds checking. + + Hardware support for bounds checking is available in the + application data integrity (ADI) feature for SPARC and is + coming for Arm; it may also be available for Intel processors + at some point. These all use a form of "memory tagging", where + allocations get a tag that is stored in the high-order byte of + the address. An offset from the address can be checked by the + hardware to see if it still falls within the allocated region + based on the tag. + + Control-flow integrity (CFI) has become more of an issue + lately because much of what attackers had used in the past has + been marked as "no execute" so they are turning to using + existing code "gadgets" already present in the kernel by + hijacking existing indirect function calls. In C, you can just + call pointers without regard to the type as it just treats + them as an address to jump to. Clang has a CFI-sanitize + feature that enforces the function prototype to restrict the + calls that can be made. It is done at runtime and is not + perfect, in part because there are lots of functions in the + kernel that take one unsigned long parameter and return an + unsigned long. + + Attacks on CFI have both a "forward edge", which is what CFI + sanitize tries to handle, and a "backward edge" that comes + from manipulating the stack values, the return address in + particular. Clang has two methods available to prevent the + stack manipulation. The first is the "safe stack", which puts + various important items (e.g. "safe" variables, register + spills, and the return address) on a separate stack. + Alternatively, the "shadow stack" feature creates a separate + stack just for return addresses. + + One problem with these other stacks is that they are still + writable, so if an attacker can find them in memory, they can + still perform their attacks. Hardware-based protections, like + Intel's Control-Flow Enforcement Technology (CET), + [43]provides a read-only shadow call stack for return + addresses. Another hardware protection is [44]pointer + authentication for Arm, which adds a kind of encrypted tag to + the return address that can be verified before it is used. + + Status and challenges + + Cook then went through the current status of handling these + different problems in the kernel. VLAs are almost completely + gone, he said, just a few remain in the crypto subsystem; he + hopes those VLAs will be gone by 4.20 (or whatever the number + of the next kernel release turns out to be). Once that + happens, he plans to turn on -Wvla for the kernel build so + that none creep back in. + + There has been steady progress made on marking fall-through + cases in switch statements. Only 745 remain to be handled of + the 2311 that existed when this work started; each one + requires scrutiny to determine what the author's intent is. + Auto-initialized local variables can be done using compiler + plugins, but that is "not quite what we want", he said. More + compiler support would be helpful there. For arithmetic + overflow, it would be nice to see GCC get support for the + unsigned case, but memory allocations are now doing explicit + overflow checking at this point. + + Bounds checking has seen some "crying about performance hits", + so we are waiting impatiently for hardware support, he said. + CFI forward-edge protection needs [45]link-time optimization + (LTO) support for Clang in the kernel, but it is currently + working on Android. For backward-edge mitigation, the Clang + shadow call stack is working on Android, but we are + impatiently waiting for hardware support for that too. + + There are a number of challenges in doing security development + for the kernel, Cook said. There are cultural boundaries due + to conservatism within the kernel community; that requires + patiently working and reworking features in order to get them + upstream. There are, of course, technical challenges because + of the complexity of security changes; those kinds of problems + can be solved. There are also resource limitations in terms of + developers, testers, reviewers, and so on. KSPP and the other + kernel security developers are still making that "slow but + steady" progress. + + Cook's [46]slides [PDF] are available for interested readers; + before long, there should be a video available of the talk as + well. + + [I would like to thank LWN's travel sponsor, the Linux + Foundation, for travel assistance to attend the Linux Security + Summit in Vancouver.] + + [47]Comments (70 posted) + + [48]The second half of the 4.19 merge window + + By Jonathan Corbet + + August 26, 2018 By the time Linus Torvalds [49]released + 4.19-rc1 and closed the merge window for this development + cycle, 12,317 non-merge changesets had found their way into + the mainline; about 4,800 of those landed after [50]last + week's summary was written. As tends to be the case late in + the merge window, many of those changes were fixes for the + bigger patches that went in early, but there were also a + number of new features added. Some of the more significant + changes include: + + Core kernel + + The full set of patches adding [51]control-group awareness to + the out-of-memory killer has not been merged due to ongoing + disagreements, but one piece of it has: there is a new + memory.oom.group control knob that will cause all processes + within a control group to be killed in an out-of-memory + situation. + + A new set of protections has been added to prevent an attacker + from fooling a program into writing to an existing file or + FIFO. An open with the O_CREAT flag to a file or FIFO in a + world-writable, sticky directory (e.g. /tmp ) will fail if the + owner of the opening process is not the owner of either the + target file or the containing directory. This behavior, + disabled by default, is controlled by the new + protected_regular and protected_fifos sysctl knobs. + + Filesystems and block layer + + The dm-integrity device-mapper target can now use a separate + device for metadata storage. + + EROFS, the "enhanced read-only filesystem", has been added to + the staging tree. It is " a lightweight read-only file system + with modern designs (eg. page-sized blocks, inline + xattrs/data, etc.) for scenarios which need high-performance + read-only requirements, eg. firmwares in mobile phone or + LIVECDs " + + The new "metadata copy-up" feature in overlayfs will avoid + copying a file's contents to the upper layer on a + metadata-only change. See [52]this commit for details. + + Hardware support + + Graphics : Qualcomm Adreno A6xx GPUs. + + Industrial I/O : Spreadtrum SC27xx series PMIC + analog-to-digital converters, Analog Devices AD5758 + digital-to-analog converters, Intersil ISL29501 time-of-flight + sensors, Silicon Labs SI1133 UV index/ambient light sensor + chips, and Bosch Sensortec BME680 sensors. + + Miscellaneous : Generic ADC-based resistive touchscreens, + Generic ASIC devices via the Google [53]Gasket framework , + Analog Devices ADGS1408/ADGS1409 multiplexers, Actions Semi + Owl SoCs DMA controllers, MEN 16Z069 watchdog timers, Rohm + BU21029 touchscreen controllers, Cirrus Logic CS47L35, + CS47L85, CS47L90, and CS47L91 codecs, Cougar 500k gaming + keyboards, Qualcomm GENI-based I2C controllers, Actions + Semiconductor Owl I2C controllers, ChromeOS EC-based USBPD + chargers, and Analog Devices ADP5061 battery chargers. + + USB : Nuvoton NPCM7XX on-chip EHCI USB controllers, Broadcom + Stingray PCIe PHYs, and Renesas R-Car generation 3 PCIe PHYs. + + There is also a new subsystem for the abstraction of GNSS + (global navigation satellite systems — GPS, for example) + receivers in the kernel. To date, such devices have been + handled with an abundance of user-space drivers; the hope is + to bring some order in this area. Support for u-blox and + SiRFstar receivers has been added as well. + + Kernel internal + + The __deprecated marker, used to mark interfaces that should + no longer be used, has been deprecated and removed from the + kernel entirely. [54]Torvalds said : " They are not useful. + They annoy everybody, and nobody ever does anything about + them, because it's always 'somebody elses problem'. And when + people start thinking that warnings are normal, they stop + looking at them, and the real warnings that mean something go + unnoticed. " + + The minimum version of GCC required by the kernel has been + moved up to 4.6. + + There are a couple of significant changes that failed to get + in this time around, including the [55]XArray data structure. + The patches are thought to be ready, but they had the bad luck + to be based on a tree that failed to be merged for other + reasons, so Torvalds [56]didn't even look at them . That, in + turn, blocks another set of patches intended to enable + migration of slab-allocated objects. + + The other big deferral is the [57]new system-call API for + filesystem mounting . Despite ongoing [58]concerns about what + happens when the same low-level device is mounted multiple + times with conflicting options, Al Viro sent [59]a pull + request to send this work upstream. The ensuing discussion + made it clear that there is still not a consensus in this + area, though, so it seems that this work has to wait for + another cycle. + + Assuming all goes well, the kernel will stabilize over the + coming weeks and the final 4.19 release will happen in + mid-October. + + [60]Comments (1 posted) + + [61]Measuring (and fixing) I/O-controller throughput loss + + August 29, 2018 + + This article was contributed by Paolo Valente + + Many services, from web hosting and video streaming to cloud + storage, need to move data to and from storage. They also + often require that each per-client I/O flow be guaranteed a + non-zero amount of bandwidth and a bounded latency. An + expensive way to provide these guarantees is to over-provision + storage resources, keeping each resource underutilized, and + thus have plenty of bandwidth available for the few I/O flows + dispatched to each medium. Alternatively one can use an I/O + controller. Linux provides two mechanisms designed to throttle + some I/O streams to allow others to meet their bandwidth and + latency requirements. These mechanisms work, but they come at + a cost: a loss of as much as 80% of total available I/O + bandwidth. I have run some tests to demonstrate this problem; + some upcoming improvements to the [62]bfq I/O scheduler + promise to improve the situation considerably. + + Throttling does guarantee control, even on drives that happen + to be highly utilized but, as will be seen, it has a hard time + actually ensuring that drives are highly utilized. Even with + greedy I/O flows, throttling easily ends up utilizing as + little as 20% of the available speed of a flash-based drive. + Such a speed loss may be particularly problematic with + lower-end storage. On the opposite end, it is also + disappointing with high-end hardware, as the Linux block I/O + stack itself has been [63]redesigned from the ground up to + fully utilize the high speed of modern, fast storage. In + addition, throttling fails to guarantee the expected + bandwidths if I/O contains both reads and writes, or is + sporadic in nature. + + On the bright side, there now seems to be an effective + alternative for controlling I/O: the proportional-share policy + provided by the bfq I/O scheduler. It enables nearly 100% + storage bandwidth utilization, at least with some of the + workloads that are problematic for throttling. An upcoming + version of bfq may be able to achieve this result with almost + all workloads. Finally, bfq guarantees bandwidths with all + workloads. The current limitation of bfq is that its execution + overhead becomes significant at speeds above 400,000 I/O + operations per second on commodity CPUs. + + Using the bfq I/O scheduler, Linux can now guarantee low + latency to lightweight flows containing sporadic, short I/O. + No throughput issues arise, and no configuration is required. + This capability benefits important, time-sensitive tasks, such + as video or audio streaming, as well as executing commands or + starting applications. Although benchmarks are not available + yet, these guarantees might also be provided by the newly + proposed [64]I/O latency controller . It allows administrators + to set target latencies for I/O requests originating from each + group of processes, and favors the groups with the lowest + target latency. + + The testbed + + I ran the tests with an ext4 filesystem mounted on a PLEXTOR + PX-256M5S SSD, which features a peak rate of ~160MB/s with + random I/O, and of ~500MB/s with sequential I/O. I used + blk-mq, in Linux 4.18. The system was equipped with a 2.4GHz + Intel Core i7-2760QM CPU and 1.3GHz DDR3 DRAM. In such a + system, a single thread doing synchronous reads reaches a + throughput of 23MB/s. + + For the purposes of these tests, each process is considered to + be in one of two groups, termed "target" and "interferers". A + target is a single-process, I/O-bound group whose I/O is + focused on. In particular, I measure the I/O throughput + enjoyed by this group to get the minimum bandwidth delivered + to the group. An interferer is single-process group whose role + is to generate additional I/O that interferes with the I/O of + the target. The tested workloads contain one target and + multiple interferers. + + The single process in each group either reads or writes, + through asynchronous (buffered) operations, to one file — + different from the file read or written by any other process — + after invalidating the buffer cache for the file. I define a + reader or writer process as either "random" or "sequential", + depending on whether it reads or writes its file at random + positions or sequentially. Finally, an interferer is defined + as being either "active" or "inactive" depending on whether it + performs I/O during the test. When an interferer is mentioned, + it is assumed that the interferer is active. + + Workloads are defined so as to try to cover the combinations + that, I believe, most influence the performance of the storage + device and of the I/O policies. For brevity, in this article I + show results for only two groups of workloads: + + Static sequential : four synchronous sequential readers or + four asynchronous sequential writers, plus five inactive + interferers. + + Static random : four synchronous random readers, all with a + block size equal to 4k, plus five inactive interferers. + + To create each workload, I considered, for each mix of + interferers in the group, two possibilities for the target: it + could be either a random or a sequential synchronous reader. + In [65]a longer version of this article [PDF] , you will also + find results for workloads with varying degrees of I/O + randomness, and for dynamic workloads (containing sporadic I/O + sources). These extra results confirm the losses of throughput + and I/O control for throttling that are shown here. + + I/O policies + + Linux provides two I/O-control mechanisms for guaranteeing (a + minimum) bandwidth, or at least fairness, to long-lived flows: + the throttling and proportional-share I/O policies. With + throttling, one can set a maximum bandwidth limit — "max + limit" for brevity — for the I/O of each group. Max limits can + be used, in an indirect way, to provide the service guarantee + at the focus of this article. For example, to guarantee + minimum bandwidths to I/O flows, a group can be guaranteed a + minimum bandwidth by limiting the maximum bandwidth of all the + other groups. + + Unfortunately, max limits have two drawbacks in terms of + throughput. First, if some groups do not use their allocated + bandwidth, that bandwidth cannot be reclaimed by other active + groups. Second, limits must comply with the worst-case speed + of the device, namely, its random-I/O peak rate. Such limits + will clearly leave a lot of throughput unused with workloads + that otherwise would drive the device to higher throughput + levels. Maximizing throughput is simply not a goal of max + limits. So, for brevity, test results with max limits are not + shown here. You can find these results, plus a more detailed + description of the above drawbacks, in the long version of + this article. + + Because of these drawbacks, a new, still experimental, low + limit has been added to the throttling policy. If a group is + assigned a low limit, then the throttling policy automatically + limits the I/O of the other groups in such a way to guarantee + to the group a minimum bandwidth equal to its assigned low + limit. This new throttling mechanism throttles no group as + long as every group is getting at least its assigned minimum + bandwidth. I tested this mechanism, but did not consider the + interesting problem of guaranteeing minimum bandwidths while, + at the same time, enforcing maximum bandwidths. + + The other I/O policy available in Linux, proportional share, + provides weighted fairness. Each group is assigned a weight, + and should receive a portion of the total throughput + proportional to its weight. This scheme guarantees minimum + bandwidths in the same way that low limits do in throttling. + In particular, it guarantees to each group a minimum bandwidth + equal to the ratio between the weight of the group, and the + sum of the weights of all the groups that may be active at the + same time. + + The actual implementation of the proportional-share policy, on + a given drive, depends on what flavor of the block layer is in + use for that drive. If the drive is using the legacy block + interface, the policy is implemented by the cfq I/O scheduler. + Unfortunately, cfq fails to control bandwidths with + flash-based storage, especially on drives featuring command + queueing. This case is not considered in these tests. With + drives using the multiqueue interface, proportional share is + implemented by bfq. This is the combination considered in the + tests. + + To benchmark both throttling (low limits) and proportional + share, I tested, for each workload, the combinations of I/O + policies and I/O schedulers reported in the table below. In + the end, there are three test cases for each workload. In + addition, for some workloads, I considered two versions of bfq + for the proportional-share policy. + + Name + + I/O policy + + Scheduler + + Parameter for target + + Parameter for each of the four active interferers + + Parameter for each of the five inactive interferers + + Sum of parameters + + low-none + + Throttling with low limits + + none + + 10MB/s + + 10MB/s (tot: 40) + + 20MB/s (tot: 100) + + 150MB/s + + prop-bfq + + Proportional share + + bfq + + 300 + + 100 (tot: 400) + + 200 (tot: 1000) + + 1700 + + For low limits, I report results with only none as the I/O + scheduler, because the results are the same with kyber and + mq-deadline. + + The capabilities of the storage medium and of low limits drove + the policy configurations. In particular: + + The configuration of the target and of the active interferers + for low-none is the one for which low-none provides its best + possible minimum-bandwidth guarantee to the target: 10MB/s, + guaranteed if all interferers are readers. Results remain the + same regardless of the values used for target latency and idle + time; I set them to 100µs and 1000µs, respectively, for every + group. + + Low limits for inactive interferers are set to twice the + limits for active interferers, to pose greater difficulties to + the policy. + + I chose weights for prop-bfq so as to guarantee about the same + minimum bandwidth as low-none to the target, in the same + only-reader worst case as for low-none and to preserve, + between the weights of active and inactive interferers, the + same ratio as between the low limits of active and inactive + interferers. + + Full details on configurations can be found in the long + version of this article. + + Each workload was run ten times for each policy, plus ten + times without any I/O control, i.e., with none as I/O + scheduler and no I/O policy in use. For each run, I measured + the I/O throughput of the target (which reveals the bandwidth + provided to the target), the cumulative I/O throughput of the + interferers, and the total I/O throughput. These quantities + fluctuated very little during each run, as well as across + different runs. Thus in the graphs I report only averages over + per-run average throughputs. In particular, for the case of no + I/O control, I report only the total I/O throughput, to give + an idea of the throughput that can be reached without imposing + any control. + + Results + + This plot shows throughput results for the simplest group of + workloads: the static-sequential set. + + With a random reader as the target against sequential readers + as interferers, low-none does guarantee the configured low + limit to the target. Yet it reaches only a low total + throughput. The throughput of the random reader evidently + oscillates around 10MB/s during the test. This implies that it + is at least slightly below 10MB/s for a significant percentage + of the time. But when this happens, the low-limit mechanism + limits the maximum bandwidth of every active group to the low + limit set for the group, i.e., to just 10MB/s. The end result + is a total throughput lower than 10% of the throughput reached + without I/O control. + + That said, the high throughput achieved without I/O control is + obtained by choking the random I/O of the target in favor of + the sequential I/O of the interferers. Thus, it is probably + more interesting to compare low-none throughput with the + throughput reachable while actually guaranteeing 10MB/s to the + target. The target is a single, synchronous, random reader, + which reaches 23MB/s while active. So, to guarantee 10MB/s to + the target, it is enough to serve it for about half of the + time, and the interferers for the other half. Since the device + reaches ~500MB/s with the sequential I/O of the interferers, + the resulting throughput with this service scheme would be + (500+23)/2, or about 260MB/s. low-none thus reaches less than + 20% of the total throughput that could be reached while still + preserving the target bandwidth. + + prop-bfq provides the target with a slightly higher throughput + than low-none. This makes it harder for prop-bfq to reach a + high total throughput, because prop-bfq serves more random I/O + (from the target) than low-none. Nevertheless, prop-bfq gets a + much higher total throughput than low-none. According to the + above estimate, this throughput is about 90% of the maximum + throughput that could be reached, for this workload, without + violating service guarantees. The reason for this good result + is that bfq provides an effective implementation of the + proportional-share service policy. At any time, each active + group is granted a fraction of the current total throughput, + and the sum of these fractions is equal to one; so group + bandwidths naturally saturate the available total throughput + at all times. + + Things change with the second workload: a random reader + against sequential writers. Now low-none reaches a much higher + total throughput than prop-bfq. low-none serves much more + sequential (write) I/O than prop-bfq because writes somehow + break the low-limit mechanisms and prevail over the reads of + the target. Conceivably, this happens because writes tend to + both starve reads in the OS (mainly by eating all available + I/O tags) and to cheat on their completion time in the drive. + In contrast, bfq is intentionally configured to privilege + reads, to counter these issues. + + In particular, low-none gets an even higher throughput than no + I/O control at all because it penalizes the random I/O of the + target even more than the no-controller configuration. + + Finally, with the last two workloads, prop-bfq reaches even + higher total throughput than with the first two. It happens + because the target also does sequential I/O, and serving + sequential I/O is much more beneficial for throughput than + serving random I/O. With these two workloads, the total + throughput is, respectively, close to or much higher than that + reached without I/O control. For the last workload, the total + throughput is much higher because, differently from none, bfq + privileges reads over asynchronous writes, and reads yield a + higher throughput than writes. In contrast, low-none still + gets lower or much lower throughput than prop-bfq, because of + the same issues that hinder low-none throughput with the first + two workloads. + + As for bandwidth guarantees, with readers as interferers + (third workload), prop-bfq, as expected, gives the target a + fraction of the total throughput proportional to its weight. + bfq approximates perfect proportional-share bandwidth + distribution among groups doing I/O of the same type (reads or + writes) and with the same locality (sequential or random). + With the last workload, prop-bfq gives much more throughput to + the reader than to all the interferers, because interferers + are asynchronous writers, and bfq privileges reads. + + The second group of workloads (static random), is the one, + among all the workloads considered, for which prop-bfq + performs worst. Results are shown below: + + This chart reports results not only for mainline bfq, but also + for an improved version of bfq which is currently under public + testing. As can be seen, with only random readers, prop-bfq + reaches a much lower total throughput than low-none. This + happens because of the Achilles heel of the bfq I/O scheduler. + If the process in service does synchronous I/O and has a + higher weight than some other process, then, to give strong + bandwidth guarantees to that process, bfq plugs I/O + dispatching every time the process temporarily stops issuing + I/O requests. In this respect, processes actually have + differentiated weights and do synchronous I/O in the workloads + tested. So bfq systematically performs I/O plugging for them. + Unfortunately, this plugging empties the internal queues of + the drive, which kills throughput with random I/O. And the I/O + of all processes in these workloads is also random. + + The situation reverses with a sequential reader as target. + Yet, the most interesting results come from the new version of + bfq, containing small changes to counter exactly the above + weakness. This version recovers most of the throughput loss + with the workload made of only random I/O and more; with the + second workload, where the target is a sequential reader, it + reaches about 3.7 times the total throughput of low-none. + + When the main concern is the latency of flows containing short + I/O, Linux seems now rather high performing, thanks to the bfq + I/O scheduler and the I/O latency controller. But if the + requirement is to provide explicit bandwidth guarantees (or + just fairness) to I/O flows, then one must be ready to give up + much or most of the speed of the storage media. bfq helps with + some workloads, but loses most of the throughput with + workloads consisting of mostly random I/O. Fortunately, there + is apparently hope for much better performance since an + improvement, still under development, seems to enable bfq to + reach a high throughput with all workloads tested so far. + + [ I wish to thank Vivek Goyal for enabling me to make this + article much more fair and sound.] + + [66]Comments (4 posted) + + [67]KDE's onboarding initiative, one year later + + August 24, 2018 + + This article was contributed by Marta Rybczyńska + + [68]Akademy + + In 2017, the KDE community decided on [69]three goals to + concentrate on for the next few years. One of them was + [70]streamlining the onboarding of new contributors (the + others were [71]improving usability and [72]privacy ). During + [73]Akademy , the yearly KDE conference that was held in + Vienna in August, Neofytos Kolokotronis shared the status of + the onboarding goal, the work done during the last year, and + further plans. While it is a complicated process in a project + as big and diverse as KDE, numerous improvements have been + already made. + + Two of the three KDE community goals were proposed by relative + newcomers. Kolokotronis was one of those, having joined the + [74]KDE Promo team not long before proposing the focus on + onboarding. He had previously been involved with [75]Chakra + Linux , a distribution based on KDE software. The fact that + new members of the community proposed strategic goals was also + noted in the [76]Sunday keynote by Claudia Garad . + + Proper onboarding adds excitement to the contribution process + and increases retention, he explained. When we look at [77]the + definition of onboarding , it is a process in which the new + contributors acquire knowledge, skills, and behaviors so that + they can contribute effectively. Kolokotronis proposed to see + it also as socialization: integration into the project's + relationships, culture, structure, and procedures. + + The gains from proper onboarding are many. The project can + grow by attracting new blood with new perspectives and + solutions. The community maintains its health and stays + vibrant. Another important advantage of efficient onboarding + is that replacing current contributors becomes easier when + they change interests, jobs, or leave the project for whatever + reason. Finally, successful onboarding adds new advocates to + the project. + + Achievements so far and future plans + + The team started with ideas for a centralized onboarding + process for the whole of KDE. They found out quickly that this + would not work because KDE is "very decentralized", so it is + hard to provide tools and procedures that are going to work + for the whole project. According to Kolokotronis, other + characteristics of KDE that impact onboarding are high + diversity, remote and online teams, and hundreds of + contributors in dozens of projects and teams. In addition, new + contributors already know in which area they want to take part + and they prefer specific information that will be directly + useful for them. + + So the team changed its approach; several changes have since + been proposed and implemented. The [78]Get Involved page, + which is expected to be one of the resources new contributors + read first, has been rewritten. For the [79]Junior Jobs page , + the team is [80] [81]discussing what the generic content for + KDE as a whole should be. The team simplified [82]Phabricator + registration , which resulted in documenting the process + better. Another part of the work includes the [83]KDE Bugzilla + ; it includes, for example initiatives to limit the number of + states of a ticket or remove obsolete products. + + The [84]Plasma Mobile team is heavily involved in the + onboarding goal. The Plasma Mobile developers have simplified + their development environment setup and created an + [85]interactive "Get Involved" page. In addition, the Plasma + team changed the way task descriptions are written; they now + contain more detail, so that it is easier to get involved. The + basic description should be short and clear, and it should + include details of the problem and possible solutions. The + developers try to share the list of skills necessary to + fulfill the tasks and include clear links to the technical + resources needed. + + Kolokotronis and team also identified a new potential source + of contributors for KDE: distributions using KDE. They have + the advantage of already knowing and using the software. The + next idea the team is working on is to make sure that setting + up a development environment is easy. The team plans to work + on this during a dedicated sprint this autumn. + + Searching for new contributors + + Kolokotronis plans to search for new contributors at the + periphery of the project, among the "skilled enthusiasts": + loyal users who actually care about the project. They "can + make wonders", he said. Those individuals may be also less + confident or shy, have troubles making the first step, and + need guidance. The project leaders should take that into + account. + + In addition, newcomers are all different. Kolokotronis + provided a long list of how contributors differ, including + skills and knowledge, motives and interests, and time and + dedication. His advice is to "try to find their superpower", + the skills they have that are missing in the team. Those + "superpowers" can then be used for the benefit of the project. + + If a project does nothing else, he said, it can start with its + documentation. However, this does not only mean code + documentation. Writing down the procedures or information + about the internal work of the project, like who is working on + what, is an important part of a project's documentation and + helps newcomers. There should be also guidelines on how to + start, especially setting up the development environment. + + The first thing the project leaders should do, according to + Kolokotronis, is to spend time on introducing newcomers to the + project. Ideally every new contributor should be assigned + mentors — more experienced members who can help them when + needed. The mentors and project leaders should find tasks that + are interesting for each person. Answering an audience + question on suggestions for shy new contributors, he + recommended even more mentoring. It is also very helpful to + make sure that newcomers have enough to read, but "avoid + RTFM", he highlighted. It is also easy for a new contributor + "to fly away", he said. The solution is to keep requesting + things and be proactive. + + What the project can do? + + Kolokotronis suggested a number of actions for a project when + it wants to improve its onboarding. The first step is + preparation: the project leaders should know the team's and + the project's needs. Long-term planning is important, too. It + is not enough to wait for contributors to come — the project + should be proactive, which means reaching out to candidates, + suggesting appropriate tasks and, finally, making people + available for the newcomers if they need help. + + This leads to next step: to be a mentor. Kolokotronis suggests + being a "great host", but also trying to phase out the + dependency on the mentor rapidly. "We have been all + newcomers", he said. It can be intimidating to join an + existing group. Onboarding creates a sense of belonging which, + in turn, increases retention. + + The last step proposed was to be strategic. This includes + thinking about the emotions you want newcomers to feel. + Kolokotronis explained the strategic part with an example. The + overall goal is (surprise!) improve onboarding of new + contributors. An intermediate objective might be to keep the + newcomers after they have made their first commit. If your + strategy is to keep them confident and proud, you can use + different tactics like praise and acknowledgment of the work + in public. Another useful tactic may be assigning simple + tasks, according to the skill of the contributor. + + To summarize, the most important thing, according to + Kolokotronis, is to respond quickly and spend time with new + contributors. This time should be used to explain procedures, + and to introduce the people and culture. It is also essential + to guide first contributions and praise contributor's skill + and effort. Increase the difficulty of tasks over time to keep + contributors motivated and challenged. And finally, he said, + "turn them into mentors". + + Kolokotronis acknowledges that onboarding "takes time" and + "everyone complains" about it. However, he is convinced that + it is beneficial in the long term and that it decreases + developer turnover. + + Advice to newcomers + + Kolokotronis concluded with some suggestions for newcomers to + a project. They should try to be persistent and to not get + discouraged when something goes wrong. Building connections + from the very beginning is helpful. He suggests asking + questions as if you were already a member "and things will be + fine". However, accept criticism if it happens. + + One of the next actions of the onboarding team will be to + collect feedback from newcomers and experienced contributors + to see if they agree on the ideas and processes introduced so + far. + + [86]Comments (none posted) + + [87]Sharing and archiving data sets with Dat + + August 27, 2018 + + This article was contributed by Antoine Beaupré + + [88]Dat is a new peer-to-peer protocol that uses some of the + concepts of [89]BitTorrent and Git. Dat primarily targets + researchers and open-data activists as it is a great tool for + sharing, archiving, and cataloging large data sets. But it can + also be used to implement decentralized web applications in a + novel way. + + Dat quick primer + + Dat is written in JavaScript, so it can be installed with npm + , but there are [90]standalone binary builds and a [91]desktop + application (as an AppImage). An [92]online viewer can be used + to inspect data for those who do not want to install arbitrary + binaries on their computers. + + The command-line application allows basic operations like + downloading existing data sets and sharing your own. Dat uses + a 32-byte hex string that is an [93]ed25519 public key , which + is is used to discover and find content on the net. For + example, this will download some sample data: $ dat clone \ + + dat://778f8d955175c92e4ced5e4f5563f69bfec0c86cc6f670352c457943- + 666fe639 \ + + ~/Downloads/dat-demo + + Similarly, the share command is used to share content. It + indexes the files in a given directory and creates a new + unique address like the one above. The share command starts a + server that uses multiple discovery mechanisms (currently, the + [94]Mainline Distributed Hash Table (DHT), a [95]custom DNS + server , and multicast DNS) to announce the content to its + peers. This is how another user, armed with that public key, + can download that content with dat clone or mirror the files + continuously with dat sync . + + So far, this looks a lot like BitTorrent [96]magnet links + updated with 21st century cryptography. But Dat adds revisions + on top of that, so modifications are automatically shared + through the swarm. That is important for public data sets as + those are often dynamic in nature. Revisions also make it + possible to use [97]Dat as a backup system by saving the data + incrementally using an [98]archiver . + + While Dat is designed to work on larger data sets, processing + them for sharing may take a while. For example, sharing the + Linux kernel source code required about five minutes as Dat + worked on indexing all of the files. This is comparable to the + performance offered by [99]IPFS and BitTorrent. Data sets with + more or larger files may take quite a bit more time. + + One advantage that Dat has over IPFS is that it doesn't + duplicate the data. When IPFS imports new data, it duplicates + the files into ~/.ipfs . For collections of small files like + the kernel, this is not a huge problem, but for larger files + like videos or music, it's a significant limitation. IPFS + eventually implemented a solution to this [100]problem in the + form of the experimental [101]filestore feature , but it's not + enabled by default. Even with that feature enabled, though, + changes to data sets are not automatically tracked. In + comparison, Dat operation on dynamic data feels much lighter. + The downside is that each set needs its own dat share process. + + Like any peer-to-peer system, Dat needs at least one peer to + stay online to offer the content, which is impractical for + mobile devices. Hosting providers like [102]Hashbase (which is + a [103]pinning service in Dat jargon) can help users keep + content online without running their own [104]server . The + closest parallel in the traditional web ecosystem would + probably be content distribution networks (CDN) although + pinning services are not necessarily geographically + distributed and a CDN does not necessarily retain a complete + copy of a website. [105] + + A web browser called [106]Beaker , based on the [107]Electron + framework, can access Dat content natively without going + through a pinning service. Furthermore, Beaker is essential to + get any of the [108]Dat applications working, as they + fundamentally rely on dat:// URLs to do their magic. This + means that Dat applications won't work for most users unless + they install that special web browser. There is a [109]Firefox + extension called " [110]dat-fox " for people who don't want to + install yet another browser, but it requires installing a + [111]helper program . The extension will be able to load + dat:// URLs but many applications will still not work. For + example, the [112]photo gallery application completely fails + with dat-fox. + + Dat-based applications look promising from a privacy point of + view. Because of its peer-to-peer nature, users regain control + over where their data is stored: either on their own computer, + an online server, or by a trusted third party. But considering + the protocol is not well established in current web browsers, + I foresee difficulties in adoption of that aspect of the Dat + ecosystem. Beyond that, it is rather disappointing that Dat + applications cannot run natively in a web browser given that + JavaScript is designed exactly for that. + + Dat privacy + + An advantage Dat has over other peer-to-peer protocols like + BitTorrent is end-to-end encryption. I was originally + concerned by the encryption design when reading the + [113]academic paper [PDF] : + + It is up to client programs to make design decisions around + which discovery networks they trust. For example if a Dat + client decides to use the BitTorrent DHT to discover peers, + and they are searching for a publicly shared Dat key (e.g. a + key cited publicly in a published scientific paper) with known + contents, then because of the privacy design of the BitTorrent + DHT it becomes public knowledge what key that client is + searching for. + + So in other words, to share a secret file with another user, + the public key is transmitted over a secure side-channel, only + to then leak during the discovery process. Fortunately, the + public Dat key is not directly used during discovery as it is + [114]hashed with BLAKE2B . Still, the security model of Dat + assumes the public key is private, which is a rather + counterintuitive concept that might upset cryptographers and + confuse users who are frequently encouraged to type such + strings in address bars and search engines as part of the Dat + experience. There is a [115]security & privacy FAQ in the Dat + documentation warning about this problem: + + One of the key elements of Dat privacy is that the public key + is never used in any discovery network. The public key is + hashed, creating the discovery key. Whenever peers attempt to + connect to each other, they use the discovery key. + + Data is encrypted using the public key, so it is important + that this key stays secure. + + There are other privacy issues outlined in the document; it + states that " Dat faces similar privacy risks as BitTorrent ": + + When you download a dataset, your IP address is exposed to the + users sharing that dataset. This may lead to honeypot servers + collecting IP addresses, as we've seen in Bittorrent. However, + with dataset sharing we can create a web of trust model where + specific institutions are trusted as primary sources for + datasets, diminishing the sharing of IP addresses. + + A Dat blog post refers to this issue as [116]reader privacy + and it is, indeed, a sensitive issue in peer-to-peer networks. + It is how BitTorrent users are discovered and served scary + verbiage from lawyers, after all. But Dat makes this a little + better because, to join a swarm, you must know what you are + looking for already, which means peers who can look at swarm + activity only include users who know the secret public key. + This works well for secret content, but for larger, public + data sets, it is a real problem; it is why the Dat project has + [117]avoided creating a Wikipedia mirror so far. + + I found another privacy issue that is not documented in the + security FAQ during my review of the protocol. As mentioned + earlier, the [118]Dat discovery protocol routinely phones home + to DNS servers operated by the Dat project. This implies that + the default discovery servers (and an attacker watching over + their traffic) know who is publishing or seeking content, in + essence discovering the "social network" behind Dat. This + discovery mechanism can be disabled in clients, but a similar + privacy issue applies to the DHT as well, although that is + distributed so it doesn't require trust of the Dat project + itself. + + Considering those aspects of the protocol, privacy-conscious + users will probably want to use Tor or other anonymization + techniques to work around those concerns. + + The future of Dat + + [119]Dat 2.0 was released in June 2017 with performance + improvements and protocol changes. [120]Dat Enhancement + Proposals (DEPs) guide the project's future development; most + work is currently geared toward implementing the draft " + [121]multi-writer proposal " in [122]HyperDB . Without + multi-writer support, only the original publisher of a Dat can + modify it. According to Joe Hand, co-executive-director of + [123]Code for Science & Society (CSS) and Dat core developer, + in an IRC chat, "supporting multiwriter is a big requirement + for lots of folks". For example, while Dat might allow Alice + to share her research results with Bob, he cannot modify or + contribute back to those results. The multi-writer extension + allows for Alice to assign trust to Bob so he can have write + access to the data. + + Unfortunately, the current proposal doesn't solve the " hard + problems " of " conflict merges and secure key distribution ". + The former will be worked out through user interface tweaks, + but the latter is a classic problem that security projects + have typically trouble finding solutions for—Dat is no + exception. How will Alice securely trust Bob? The OpenPGP web + of trust? Hexadecimal fingerprints read over the phone? Dat + doesn't provide a magic solution to this problem. + + Another thing limiting adoption is that Dat is not packaged in + any distribution that I could find (although I [124]requested + it in Debian ) and, considering the speed of change of the + JavaScript ecosystem, this is unlikely to change any time + soon. A [125]Rust implementation of the Dat protocol has + started, however, which might be easier to package than the + multitude of [126]Node.js modules. In terms of mobile device + support, there is an experimental Android web browser with Dat + support called [127]Bunsen , which somehow doesn't run on my + phone. Some adventurous users have successfully run Dat in + [128]Termux . I haven't found an app running on iOS at this + point. + + Even beyond platform support, distributed protocols like Dat + have a tough slope to climb against the virtual monopoly of + more centralized protocols, so it remains to be seen how + popular those tools will be. Hand says Dat is supported by + multiple non-profit organizations. Beyond CSS, [129]Blue Link + Labs is working on the Beaker Browser as a self-funded startup + and a grass-roots organization, [130]Digital Democracy , has + contributed to the project. The [131]Internet Archive has + [132]announced a collaboration between itself, CSS, and the + California Digital Library to launch a pilot project to see " + how members of a cooperative, decentralized network can + leverage shared services to ensure data preservation while + reducing storage costs and increasing replication counts ". + + Hand said adoption in academia has been "slow but steady" and + that the [133]Dat in the Lab project has helped identify areas + that could help researchers adopt the project. Unfortunately, + as is the case with many free-software projects, he said that + "our team is definitely a bit limited on bandwidth to push for + bigger adoption". Hand said that the project received a grant + from [134]Mozilla Open Source Support to improve its + documentation, which will be a big help. + + Ultimately, Dat suffers from a problem common to all + peer-to-peer applications, which is naming. Dat addresses are + not exactly intuitive: humans do not remember strings of 64 + hexadecimal characters well. For this, Dat took a [135]similar + approach to IPFS by using DNS TXT records and /.well-known URL + paths to bridge existing, human-readable names with Dat + hashes. So this sacrifices a part of the decentralized nature + of the project in favor of usability. + + I have tested a lot of distributed protocols like Dat in the + past and I am not sure Dat is a clear winner. It certainly has + advantages over IPFS in terms of usability and resource usage, + but the lack of packages on most platforms is a big limit to + adoption for most people. This means it will be difficult to + share content with my friends and family with Dat anytime + soon, which would probably be my primary use case for the + project. Until the protocol reaches the wider adoption that + BitTorrent has seen in terms of platform support, I will + probably wait before switching everything over to this + promising project. + + [136]Comments (11 posted) + + Page editor : Jonathan Corbet + + Inside this week's LWN.net Weekly Edition + + [137]Briefs : OpenSSH 7.8; 4.19-rc1; Which stable?; Netdev + 0x12; Bison 3.1; Quotes; ... + + [138]Announcements : Newsletters; events; security updates; + kernel patches; ... Next page : [139]Brief items>> + + + + [1] https://lwn.net/Articles/763743/ + + [2] https://lwn.net/Articles/763626/ + + [3] https://lwn.net/Articles/763641/ + + [4] https://lwn.net/Articles/763106/ + + [5] https://lwn.net/Articles/763603/ + + [6] https://lwn.net/Articles/763175/ + + [7] https://lwn.net/Articles/763492/ + + [8] https://lwn.net/Articles/763254/ + + [9] https://lwn.net/Articles/763255/ + + [10] https://lwn.net/Articles/763743/#Comments + + [11] https://lwn.net/Articles/763626/ + + [12] http://julialang.org/ + + [13] https://julialang.org/blog/2018/08/one-point-zero + + [14] https://julialang.org/benchmarks/ + + [15] https://juliacomputing.com/ + + [16] https://en.wikipedia.org/wiki/Read%E2%80%93eval%E2%80%93p- + rint_loop + + [17] http://llvm.org/ + + [18] http://www.3blue1brown.com/essence-of-linear-algebra-page/ + + [19] http://www.netlib.org/lapack/ + + [20] https://lwn.net/Articles/657157/ + + [21] https://julialang.org/publications/julia-fresh-approach-B- + EKS.pdf + + [22] https://lwn.net/Articles/738915/ + + [23] https://pypy.org/ + + [24] https://github.com/JuliaPy/PyCall.jl + + [25] https://github.com/JuliaInterop/RCall.jl + + [26] https://docs.julialang.org/en/stable/ + + [27] https://julialang.org/learning/ + + [28] http://bogumilkaminski.pl/files/julia_express.pdf + + [29] https://docs.julialang.org/en/stable/manual/noteworthy-di- + fferences/#Noteworthy-differences-from-Python-1 + + [30] https://lwn.net/Articles/746386/ + + [31] https://github.com/JuliaLang/IJulia.jl + + [32] https://lwn.net/Articles/764001/ + + [33] https://lwn.net/Articles/763626/#Comments + + [34] https://lwn.net/Articles/763641/ + + [35] https://lwn.net/Archives/ConferenceByYear/#2018-Linux_Sec- + urity_Summit_NA + + [36] https://events.linuxfoundation.org/events/linux-security- + summit-north-america-2018/ + + [37] https://kernsec.org/wiki/index.php/Kernel_Self_Protection- + _Project + + [38] https://lwn.net/Articles/763644/ + + [39] https://raphlinus.github.io/programming/rust/2018/08/17/u- + ndefined-behavior.html + + [40] https://lwn.net/Articles/749064/ + + [41] https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/- + linux.git/commit/?id=02361bc77888 + + [42] https://lore.kernel.org/lkml/CA+55aFzCG-zNmZwX4A2FQpadafL- + fEzK6CC=qPXydAacU1RqZWA@mail.gmail.com/T/#u + + [43] https://lwn.net/Articles/758245/ + + [44] https://lwn.net/Articles/718888/ + + [45] https://lwn.net/Articles/744507/ + + [46] https://outflux.net/slides/2018/lss/danger.pdf + + [47] https://lwn.net/Articles/763641/#Comments + + [48] https://lwn.net/Articles/763106/ + + [49] https://lwn.net/Articles/763497/ + + [50] https://lwn.net/Articles/762566/ + + [51] https://lwn.net/Articles/761118/ + + [52] https://git.kernel.org/linus/d5791044d2e5749ef4de84161cec- + 5532e2111540 + + [53] https://lwn.net/ml/linux-kernel/20180630000253.70103-1-sq- + ue@chromium.org/ + + [54] https://git.kernel.org/linus/771c035372a036f83353eef46dbb- + 829780330234 + + [55] https://lwn.net/Articles/745073/ + + [56] https://lwn.net/ml/linux-kernel/CA+55aFxFjAmrFpwQmEHCthHO- + zgidCKnod+cNDEE+3Spu9o1s3w@mail.gmail.com/ + + [57] https://lwn.net/Articles/759499/ + + [58] https://lwn.net/Articles/762355/ + + [59] https://lwn.net/ml/linux-fsdevel/20180823223145.GK6515@Ze- + nIV.linux.org.uk/ + + [60] https://lwn.net/Articles/763106/#Comments + + [61] https://lwn.net/Articles/763603/ + + [62] https://lwn.net/Articles/601799/ + + [63] https://lwn.net/Articles/552904 + + [64] https://lwn.net/Articles/758963/ + + [65] http://algogroup.unimore.it/people/paolo/pub-docs/extende- + d-lat-bw-throughput.pdf + + [66] https://lwn.net/Articles/763603/#Comments + + [67] https://lwn.net/Articles/763175/ + + [68] https://lwn.net/Archives/ConferenceByYear/#2018-Akademy + + [69] https://dot.kde.org/2017/11/30/kdes-goals-2018-and-beyond + + [70] https://phabricator.kde.org/T7116 + + [71] https://phabricator.kde.org/T6831 + + [72] https://phabricator.kde.org/T7050 + + [73] https://akademy.kde.org/ + + [74] https://community.kde.org/Promo + + [75] https://www.chakralinux.org/ + + [76] https://conf.kde.org/en/Akademy2018/public/events/79 + + [77] https://en.wikipedia.org/wiki/Onboarding + + [78] https://community.kde.org/Get_Involved + + [79] https://community.kde.org/KDE/Junior_Jobs + + [80] https://lwn.net/Articles/763189/ + + [81] https://phabricator.kde.org/T8686 + + [82] https://phabricator.kde.org/T7646 + + [83] https://bugs.kde.org/ + + [84] https://www.plasma-mobile.org/index.html + + [85] https://www.plasma-mobile.org/findyourway + + [86] https://lwn.net/Articles/763175/#Comments + + [87] https://lwn.net/Articles/763492/ + + [88] https://datproject.org + + [89] https://www.bittorrent.com/ + + [90] https://github.com/datproject/dat/releases + + [91] https://docs.datproject.org/install + + [92] https://datbase.org/ + + [93] https://ed25519.cr.yp.to/ + + [94] https://en.wikipedia.org/wiki/Mainline_DHT + + [95] https://github.com/mafintosh/dns-discovery + + [96] https://en.wikipedia.org/wiki/Magnet_URI_scheme + + [97] https://blog.datproject.org/2017/10/13/using-dat-for-auto- + matic-file-backups/ + + [98] https://github.com/mafintosh/hypercore-archiver + + [99] https://ipfs.io/ + + [100] https://github.com/ipfs/go-ipfs/issues/875 + + [101] https://github.com/ipfs/go-ipfs/blob/master/docs/experim- + ental-features.md#ipfs-filestore + + [102] https://hashbase.io/ + + [103] https://github.com/datprotocol/DEPs/blob/master/proposal- + s/0003-http-pinning-service-api.md + + [104] https://docs.datproject.org/server + + [105] https://lwn.net/Articles/763544/ + + [106] https://beakerbrowser.com/ + + [107] https://electronjs.org/ + + [108] https://github.com/beakerbrowser/explore + + [109] https://addons.mozilla.org/en-US/firefox/addon/dat-p2p-p- + rotocol/ + + [110] https://github.com/sammacbeth/dat-fox + + [111] https://github.com/sammacbeth/dat-fox-helper + + [112] https://github.com/beakerbrowser/dat-photos-app + + [113] https://github.com/datproject/docs/raw/master/papers/dat- + paper.pdf + + [114] https://github.com/datprotocol/DEPs/blob/653e0cf40233b5d- + 474cddc04235577d9d55b2934/proposals/0000-peer-discovery.md#dis- + covery-keys + + [115] https://docs.datproject.org/security + + [116] https://blog.datproject.org/2016/12/12/reader-privacy-on- + the-p2p-web/ + + [117] https://blog.datproject.org/2017/12/10/dont-ship/ + + [118] https://github.com/datprotocol/DEPs/pull/7 + + [119] https://blog.datproject.org/2017/06/01/dat-sleep-release/ + + [120] https://github.com/datprotocol/DEPs + + [121] https://github.com/datprotocol/DEPs/blob/master/proposal- + s/0008-multiwriter.md + + [122] https://github.com/mafintosh/hyperdb + + [123] https://codeforscience.org/ + + [124] https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=890565 + + [125] https://github.com/datrs + + [126] https://nodejs.org/en/ + + [127] https://bunsenbrowser.github.io/#!index.md + + [128] https://termux.com/ + + [129] https://bluelinklabs.com/ + + [130] https://www.digital-democracy.org/ + + [131] https://archive.org + + [132] https://blog.archive.org/2018/06/05/internet-archive-cod- + e-for-science-and-society-and-california-digital-library-to-pa- + rtner-on-a-data-sharing-and-preservation-pilot-project/ + + [133] https://github.com/codeforscience/Dat-in-the-Lab + + [134] https://www.mozilla.org/en-US/moss/ + + [135] https://github.com/datprotocol/DEPs/blob/master/proposal- + s/0005-dns.md + + [136] https://lwn.net/Articles/763492/#Comments + + [137] https://lwn.net/Articles/763254/ + + [138] https://lwn.net/Articles/763255/ + + [139] https://lwn.net/Articles/763254/ + + + diff --git a/test/expected/LWN/0000763252.header b/test/expected/LWN/0000763252.header new file mode 100644 index 0000000..8675073 --- /dev/null +++ b/test/expected/LWN/0000763252.header @@ -0,0 +1,3 @@ +0LWN.net Weekly Edition for August 30, 2018 null/LWN/0000763252 70 +i +i diff --git a/test/expected/LWN/0000763252.header.html b/test/expected/LWN/0000763252.header.html new file mode 100644 index 0000000..ddbdc83 --- /dev/null +++ b/test/expected/LWN/0000763252.header.html @@ -0,0 +1,20 @@ + + + + + + + + +
+

LWN.net Weekly Edition for August 30, 2018

+
+
+
+ +
+
+
+ diff --git a/test/expected/LWN/0000763252.html b/test/expected/LWN/0000763252.html new file mode 100644 index 0000000..183fc89 --- /dev/null +++ b/test/expected/LWN/0000763252.html @@ -0,0 +1,25 @@ + + + + + + + + +
+

LWN.net Weekly Edition for August 30, 2018

+
+
+ +
+
+ [1]Welcome to the LWN.net Weekly Edition for August 30, 2018 This edition contains the following feature content:

[2]An introduction to the Julia language, part 1 : Julia is a language designed for intensive numerical calculations; this article gives an overview of its core features.

[3]C considered dangerous : a Linux Security Summit talk on what is being done to make the use of C in the kernel safer.

[4]The second half of the 4.19 merge window : the final features merged (or not merged) before the merge window closed for this cycle.

[5]Measuring (and fixing) I/O-controller throughput loss : the kernel's I/O controllers can provide useful bandwidth guarantees, but at a significant cost in throughput.

[6]KDE's onboarding initiative, one year later : what has gone right in KDE's effort to make it easier for contributors to join the project, and what remains to be done.

[7]Sharing and archiving data sets with Dat : an innovative approach to addressing and sharing data on the net.

This week's edition also includes these inner pages:

[8]Brief items : Brief news items from throughout the community.

[9]Announcements : Newsletters, conferences, security updates, patches, and more.

Please enjoy this week's edition, and, as always, thank you for supporting LWN.net.

[10]Comments (none posted)

[11]An introduction to the Julia language, part 1

August 28, 2018

This article was contributed by Lee Phillips

[12]Julia is a young computer language aimed at serving the needs of scientists, engineers, and other practitioners of numerically intensive programming. It was first publicly released in 2012. After an intense period of language development, version 1.0 was [13]released on August 8. The 1.0 release promises years of language stability; users can be confident that developments in the 1.x series will not break their code. This is the first part of a two-part article introducing the world of Julia. This part will introduce enough of the language syntax and constructs to allow you to begin to write simple programs. The following installment will acquaint you with the additional pieces needed to create real projects, and to make use of Julia's ecosystem.

Goals and history

The Julia project has ambitious goals. It wants the language to perform about as well as Fortran or C when running numerical algorithms, while remaining as pleasant to program in as Python. I believe the project has met these goals and is poised to see increasing adoption by numerical researchers, especially now that an official, stable release is available.

The Julia project maintains a [14]micro-benchmark page that compares its numerical performance against both statically compiled languages (C, Fortran) and dynamically typed languages (R, Python). While it's certainly possible to argue about the relevance and fairness of particular benchmarks, the data overall supports the Julia team's contention that Julia has generally achieved parity with Fortran and C; the benchmark source code is available.

Julia began as research in computer science at MIT; its creators are Alan Edelman, Stefan Karpinski, Jeff Bezanson, and Viral Shah. These four remain active developers of the language. They, along with Keno Fischer, co-founder and CTO of [15]Julia Computing , were kind enough to share their thoughts with us about the language. I'll be drawing on their comments later on; for now, let's get a taste of what Julia code looks like.

Getting started

To explore Julia initially, start up its standard [16]read-eval-print loop (REPL) by typing julia at the terminal, assuming that you have installed it. You will then be able to interact with what will seem to be an interpreted language — but, behind the scenes, those commands are being compiled by a just-in-time (JIT) compiler that uses the [17]LLVM compiler framework . This allows Julia to be interactive, while turning the code into fast, native machine instructions. However, the JIT compiler passes sometimes introduce noticeable delays at the REPL, especially when using a function for the first time.

To run a Julia program non-interactively, execute a command like: $ julia script.jl

Julia has all the usual data structures: numbers of various types (including complex and rational numbers), multidimensional arrays, dictionaries, strings, and characters. Functions are first-class: they can be passed as arguments to other functions, can be members of arrays, and so on.

Julia embraces Unicode. Strings, which are enclosed in double quotes, are arrays of Unicode characters, which are enclosed in single quotes. The " * " operator is used for string and character concatenation. Thus 'a' and 'β' are characters, and 'aβ' is a syntax error. "a" and "β" are strings, as are "aβ", 'a' * 'β', and "a" * "β" — all evaluate to the same string.

Variable and function names can contain non-ASCII characters. This, along with Julia's clever syntax that understands numbers prepended to variables to mean multiplication, goes a long way to allowing the numerical scientist to write code that more closely resembles the compact mathematical notation of the equations that usually lie behind it. julia ε₁ = 0.01

0.01

julia ε₂ = 0.02

0.02

julia 2ε₁ + 3ε₂

0.08

And where does Julia come down on the age-old debate of what do about 1/2 ? In Fortran and Python 2, this will get you 0, since 1 and 2 are integers, and the result is rounded down to the integer 0. This was deemed inconsistent, and confusing to some, so it was changed in Python 3 to return 0.5 — which is what you get in Julia, too.

While we're on the subject of fractions, Julia can handle rational numbers, with a special syntax: 3//5 + 2//3 returns 19//15 , while 3/5 + 2/3 gets you the floating-point answer 1.2666666666666666. Internally, Julia thinks of a rational number in its reduced form, so the expression 6//8 == 3//4 returns true , and numerator(6//8) returns 3 .

Arrays

Arrays are enclosed in square brackets and indexed with an iterator that can contain a step value: julia a = [1, 2, 3, 4, 5, 6]

6-element Array{Int64,1}:

1

2

3

4

5

6

julia a[1:2:end]

3-element Array{Int64,1}:

1

3

5

As you can see, indexing starts at one, and the useful end index means the obvious thing. When you define a variable in the REPL, Julia replies with the type and value of the assigned data; you can suppress this output by ending your input line with a semicolon.

Since arrays are such a vital part of numerical computation, and Julia makes them easy to work with, we'll spend a bit more time with them than the other data structures.

To illustrate the syntax, we can start with a couple of 2D arrays, defined at the REPL: julia a = [1 2 3; 4 5 6]

2×3 Array{Int64,2}:

1 2 3

4 5 6

julia z = [-1 -2 -3; -4 -5 -6];

Indexing is as expected: julia a[1, 2]

2

You can glue arrays together horizontally: julia [a z]

2×6 Array{Int64,2}:

1 2 3 -1 -2 -3

4 5 6 -4 -5 -6

And vertically: julia [a; z]

4×3 Array{Int64,2}:

1 2 3

4 5 6

-1 -2 -3

-4 -5 -6

Julia has all the usual operators for handling arrays, and [18]linear algebra functions that work with matrices (2D arrays). The linear algebra functions are part of Julia's standard library, but need to be imported with a command like " using LinearAlgebra ", which is a detail omitted from the current documentation. The functions include such things as determinants, matrix inverses, eigenvalues and eigenvectors, many kinds of matrix factorizations, etc. Julia has not reinvented the wheel here, but wisely uses the [19]LAPACK Fortran library of battle-tested linear algebra routines.

The extension of arithmetic operators to arrays is usually intuitive: julia a + z

2×3 Array{Int64,2}:

0 0 0

0 0 0

And the numerical prepending syntax works with arrays, too: julia 3a + 4z

2×3 Array{Int64,2}:

-1 -2 -3

-4 -5 -6

Putting a multiplication operator between two matrices gets you matrix multiplication: julia a * transpose(a)

2×2 Array{Int64,2}:

14 32

32 77

You can "broadcast" numbers to cover all the elements in an array by prepending the usual arithmetic operators with a dot: julia 1 .+ a

2×3 Array{Int64,2}:

2 3 4

5 6 7

Note that the language only actually requires the dot for some operators, but not for others, such as "*" and "/". The reasons for this are arcane, and it probably makes sense to be consistent and use the dot whenever you intend broadcasting. Note also that the current version of the official documentation is incorrect in claiming that you may omit the dot from "+" and "-"; in fact, this now gives an error.

You can use the dot notation to turn any function into one that operates on each element of an array: julia round.(sin.([0, π/2, π, 3π/2, 2π]))

5-element Array{Float64,1}:

0.0

1.0

0.0

-1.0

-0.0

The example above illustrates chaining two dotted functions together. The Julia compiler turns expressions like this into "fused" operations: instead of applying each function in turn to create a new array that is passed to the next function, the compiler combines the functions into a single compound function that is applied once over the array, creating a significant optimization.

You can use this dot notation with any function, including your own, to turn it into a version that operates element-wise over arrays.

Dictionaries (associative arrays) can be defined with several syntaxes. Here's one: julia d1 = Dict("A"=1, "B"=2)

Dict{String,Int64} with 2 entries:

"B" = 2

"A" = 1

You may have noticed that the code snippets so far have not included any type declarations. Every value in Julia has a type, but the compiler will infer types if they are not specified. It is generally not necessary to declare types for performance, but type declarations sometimes serve other purposes, that we'll return to later. Julia has a deep and sophisticated type system, including user-defined types and C-like structs. Types can have behaviors associated with them, and can inherit behaviors from other types. The best thing about Julia's type system is that you can ignore it entirely, use just a few pieces of it, or spend weeks studying its design.

Control flow

Julia code is organized in blocks, which can indicate control flow, function definitions, and other code units. Blocks are terminated with the end keyword, and indentation is not significant. Statements are separated either with newlines or semicolons.

Julia has the typical control flow constructs; here is a while block: julia i = 1;

julia while i 5

print(i)

global i = i + 1

end

1234

Notice the global keyword. Most blocks in Julia introduce a local scope for variables; without this keyword here, we would get an error about an undefined variable.

Julia has the usual if statements and for loops that use the same iterators that we introduced above for array indexing. We can also iterate over collections: julia for i ∈ ['a', 'b', 'c']

println(i)

end

a

b

c

In place of the fancy math symbol in this for loop, we can use " = " or " in ". If you want to use the math symbol but have no convenient way to type it, the REPL will help you: type " \in " and the TAB key, and the symbol appears; you can type many [20]LaTeX expressions into the REPL in this way.

Development of Julia

The language is developed on GitHub, with over 700 contributors. The Julia team mentioned in their email to us that the decision to use GitHub has been particularly good for Julia, as it streamlined the process for many of their contributors, who are scientists or domain experts in various fields, rather than professional software developers.

The creators of Julia have [21]published [PDF] a detailed “mission statement” for the language, describing their aims and motivations. A key issue that they wanted their language to solve is what they called the "two-language problem." This situation is familiar to anyone who has used Python or another dynamic language on a demanding numerical problem. To get good performance, you will wind up rewriting the numerically intensive parts of the program in C or Fortran, dealing with the interface between the two languages, and may still be disappointed in the overhead presented by calling the foreign routines from your original code.

For Python, [22]NumPy and SciPy wrap many numerical routines, written in Fortran or C, for efficient use from that language, but you can only take advantage of this if your calculation fits the pattern of an available routine; in more general cases, where you will have to write a loop over your data, you are stuck with Python's native performance, which is orders of magnitude slower. If you switch to an alternative, faster implementation of Python, such as [23]PyPy , the numerical libraries may not be compatible; NumPy became available for PyPy only within about the past year.

Julia solves the two-language problem by being as expressive and simple to program in as a dynamic scripting language, while having the native performance of a static, compiled language. There is no need to write numerical libraries in a second language, but C or Fortran library routines can be called using a facility that Julia has built-in. Other languages, such as [24]Python or [25]R , can also interoperate easily with Julia using external packages.

Documentation

There are many resources to turn to to learn the language. There is an extensive and detailed [26]manual at Julia headquarters, and this may be a good place to start. However, although the first few chapters provide a gentle introduction, the material soon becomes dense and, at times, hard to follow, with references to concepts that are not explained until later chapters. Fortunately, there is a [27]"learning" link at the top of the Julia home page, which takes you to a long list of videos, tutorials, books, articles, and classes both about Julia and that use Julia in teaching subjects such a numerical analysis. There is also a fairly good [28]cheat-sheet [PDF] , which was just updated for v. 1.0.

If you're coming from Python, [29]this list of noteworthy differences between Python and Julia syntax will probably be useful.

Some of the linked tutorials are in the form of [30]Jupyter notebooks — indeed, the name "Jupyter" is formed from "Julia", "Python", and "R", which are the three original languages supported by the interface. The [31]Julia kernel for Jupyter was recently upgraded to support v. 1.0. Judicious sampling of a variety of documentation sources, combined with liberal experimentation, may be the best way of learning the language. Jupyter makes this experimentation more inviting for those who enjoy the web-based interface, but the REPL that comes with Julia helps a great deal in this regard by providing, for instance, TAB completion and an extensive help system invoked by simply pressing the "?" key.

Stay tuned

The [32]next installment in this two-part series will explain how Julia is organized around the concept of "multiple dispatch". You will learn how to create functions and make elementary use of Julia's type system. We'll see how to install packages and use modules, and how to make graphs. Finally, Part 2 will briefly survey the important topics of macros and distributed computing.

[33]Comments (80 posted)

[34]C considered dangerous

By Jake Edge

August 29, 2018

[35]LSS NA

At the North America edition of the [36]2018 Linux Security Summit (LSS NA), which was held in late August in Vancouver, Canada, Kees Cook gave a presentation on some of the dangers that come with programs written in C. In particular, of course, the Linux kernel is mostly written in C, which means that the security of our systems rests on a somewhat dangerous foundation. But there are things that can be done to help firm things up by " Making C Less Dangerous " as the title of his talk suggested.

He began with a brief summary of the work that he and others are doing as part of the [37]Kernel Self Protection Project (KSPP). The goal of the project is to get kernel protections merged into the mainline. These protections are not targeted at protecting user-space processes from other (possibly rogue) processes, but are, instead, focused on protecting the kernel from user-space code. There are around 12 organizations and ten individuals working on roughly 20 different technologies as part of the KSPP, he said. The progress has been "slow and steady", he said, which is how he thinks it should go. [38]

One of the main problems is that C is treated mostly like a fancy assembler. The kernel developers do this because they want the kernel to be as fast and as small as possible. There are other reasons, too, such as the need to do architecture-specific tasks that lack a C API (e.g. setting up page tables, switching to 64-bit mode).

But there is lots of undefined behavior in C. This "operational baggage" can lead to various problems. In addition, C has a weak standard library with multiple utility functions that have various pitfalls. In C, the content of uninitialized automatic variables is undefined, but in the machine code that it gets translated to, the value is whatever happened to be in that memory location before. In C, a function pointer can be called even if the type of the pointer does not match the type of the function being called—assembly doesn't care, it just jumps to a location, he said.

The APIs in the standard library are also bad in many cases. He asked: why is there no argument to memcpy() to specify the maximum destination length? He noted a recent [39]blog post from Raph Levien entitled "With Undefined Behavior, Anything is Possible". That obviously resonated with Cook, as he pointed out his T-shirt—with the title and artwork from the post.

Less danger

He then moved on to some things that kernel developers can do (and are doing) to get away from some of the dangers of C. He began with variable-length arrays (VLAs), which can be used to overflow the stack to access data outside of its region. Even if the stack has a guard page, VLAs can be used to jump past it to write into other memory, which can then be used by some other kind of attack. The C language is "perfectly fine with this". It is easy to find uses of VLAs with the -Wvla flag, however.

But it turns out that VLAs are [40]not just bad from a security perspective , they are also slow. In a micro-benchmark associated with a [41]patch removing a VLA , a 13% performance boost came from using a fixed-size array. He dug in a bit further and found that much more code is being generated to handle a VLA, which explains the speed increase. Since Linus Torvalds has [42]declared that VLAs should be removed from the kernel because they cause security problems and also slow the kernel down; Cook said "don't use VLAs".

Another problem area is switch statements, in particular where there is no break for a case . That could mean that the programmer expects and wants to fall through to the next case or it could be that the break was simply forgotten. There is a way to get a warning from the compiler for fall-throughs, but there needs to be a way to mark those that are truly meant to be that way. A special fall-through "statement" in the form of a comment is what has been agreed on within the static-analysis community. He and others have been going through each of the places where there is no break to add these comments (or a break ); they have "found a lot of bugs this way", he said.

Uninitialized local variables will generate a warning, but not if the variable is passed in by reference. There are some GCC plugins that will automatically initialize these variables, but there are also patches for both GCC and Clang to provide a compiler option to do so. Neither of those is upstream yet, but Torvalds has praised the effort so the kernel would likely use the option. An interesting side effect that came about while investigating this was a warning he got about unreachable code when he enabled the auto-initialization. There were two variables declared just after a switch (and outside of any case ), where they would never be reached.

Arithmetic overflow is another undefined behavior in C that can cause various problems. GCC can check for signed overflow, which performs well (the overhead is in the noise, he said), but adding warning messages for it does grow the kernel by 6%; making the overflow abort, instead, only adds 0.1%. Clang can check for both signed and unsigned overflow; signed overflow is undefined, while unsigned overflow is defined, but often unexpected. Marking places where unsigned overflow is expected is needed; it would be nice to get those annotations put into the kernel, Cook said.

Explicit bounds checking is expensive. Doing it for copy_{to,from}_user() is a less than 1% performance hit, but adding it to the strcpy() and memcpy() families are around a 2% hit. Pre-Meltdown that would have been a totally impossible performance regression for security, he said; post-Meltdown, since it is less than 5%, maybe there is a chance to add this checking.

Better APIs would help as well. He pointed to the evolution of strcpy() , through str n cpy() and str l cpy() (each with their own bounds flaws) to str s cpy() , which seems to be "OK so far". He also mentioned memcpy() again as a poor API with respect to bounds checking.

Hardware support for bounds checking is available in the application data integrity (ADI) feature for SPARC and is coming for Arm; it may also be available for Intel processors at some point. These all use a form of "memory tagging", where allocations get a tag that is stored in the high-order byte of the address. An offset from the address can be checked by the hardware to see if it still falls within the allocated region based on the tag.

Control-flow integrity (CFI) has become more of an issue lately because much of what attackers had used in the past has been marked as "no execute" so they are turning to using existing code "gadgets" already present in the kernel by hijacking existing indirect function calls. In C, you can just call pointers without regard to the type as it just treats them as an address to jump to. Clang has a CFI-sanitize feature that enforces the function prototype to restrict the calls that can be made. It is done at runtime and is not perfect, in part because there are lots of functions in the kernel that take one unsigned long parameter and return an unsigned long.

Attacks on CFI have both a "forward edge", which is what CFI sanitize tries to handle, and a "backward edge" that comes from manipulating the stack values, the return address in particular. Clang has two methods available to prevent the stack manipulation. The first is the "safe stack", which puts various important items (e.g. "safe" variables, register spills, and the return address) on a separate stack. Alternatively, the "shadow stack" feature creates a separate stack just for return addresses.

One problem with these other stacks is that they are still writable, so if an attacker can find them in memory, they can still perform their attacks. Hardware-based protections, like Intel's Control-Flow Enforcement Technology (CET), [43]provides a read-only shadow call stack for return addresses. Another hardware protection is [44]pointer authentication for Arm, which adds a kind of encrypted tag to the return address that can be verified before it is used.

Status and challenges

Cook then went through the current status of handling these different problems in the kernel. VLAs are almost completely gone, he said, just a few remain in the crypto subsystem; he hopes those VLAs will be gone by 4.20 (or whatever the number of the next kernel release turns out to be). Once that happens, he plans to turn on -Wvla for the kernel build so that none creep back in.

There has been steady progress made on marking fall-through cases in switch statements. Only 745 remain to be handled of the 2311 that existed when this work started; each one requires scrutiny to determine what the author's intent is. Auto-initialized local variables can be done using compiler plugins, but that is "not quite what we want", he said. More compiler support would be helpful there. For arithmetic overflow, it would be nice to see GCC get support for the unsigned case, but memory allocations are now doing explicit overflow checking at this point.

Bounds checking has seen some "crying about performance hits", so we are waiting impatiently for hardware support, he said. CFI forward-edge protection needs [45]link-time optimization (LTO) support for Clang in the kernel, but it is currently working on Android. For backward-edge mitigation, the Clang shadow call stack is working on Android, but we are impatiently waiting for hardware support for that too.

There are a number of challenges in doing security development for the kernel, Cook said. There are cultural boundaries due to conservatism within the kernel community; that requires patiently working and reworking features in order to get them upstream. There are, of course, technical challenges because of the complexity of security changes; those kinds of problems can be solved. There are also resource limitations in terms of developers, testers, reviewers, and so on. KSPP and the other kernel security developers are still making that "slow but steady" progress.

Cook's [46]slides [PDF] are available for interested readers; before long, there should be a video available of the talk as well.

[I would like to thank LWN's travel sponsor, the Linux Foundation, for travel assistance to attend the Linux Security Summit in Vancouver.]

[47]Comments (70 posted)

[48]The second half of the 4.19 merge window

By Jonathan Corbet

August 26, 2018 By the time Linus Torvalds [49]released 4.19-rc1 and closed the merge window for this development cycle, 12,317 non-merge changesets had found their way into the mainline; about 4,800 of those landed after [50]last week's summary was written. As tends to be the case late in the merge window, many of those changes were fixes for the bigger patches that went in early, but there were also a number of new features added. Some of the more significant changes include:

Core kernel

The full set of patches adding [51]control-group awareness to the out-of-memory killer has not been merged due to ongoing disagreements, but one piece of it has: there is a new memory.oom.group control knob that will cause all processes within a control group to be killed in an out-of-memory situation.

A new set of protections has been added to prevent an attacker from fooling a program into writing to an existing file or FIFO. An open with the O_CREAT flag to a file or FIFO in a world-writable, sticky directory (e.g. /tmp ) will fail if the owner of the opening process is not the owner of either the target file or the containing directory. This behavior, disabled by default, is controlled by the new protected_regular and protected_fifos sysctl knobs.

Filesystems and block layer

The dm-integrity device-mapper target can now use a separate device for metadata storage.

EROFS, the "enhanced read-only filesystem", has been added to the staging tree. It is " a lightweight read-only file system with modern designs (eg. page-sized blocks, inline xattrs/data, etc.) for scenarios which need high-performance read-only requirements, eg. firmwares in mobile phone or LIVECDs "

The new "metadata copy-up" feature in overlayfs will avoid copying a file's contents to the upper layer on a metadata-only change. See [52]this commit for details.

Hardware support

Graphics : Qualcomm Adreno A6xx GPUs.

Industrial I/O : Spreadtrum SC27xx series PMIC analog-to-digital converters, Analog Devices AD5758 digital-to-analog converters, Intersil ISL29501 time-of-flight sensors, Silicon Labs SI1133 UV index/ambient light sensor chips, and Bosch Sensortec BME680 sensors.

Miscellaneous : Generic ADC-based resistive touchscreens, Generic ASIC devices via the Google [53]Gasket framework , Analog Devices ADGS1408/ADGS1409 multiplexers, Actions Semi Owl SoCs DMA controllers, MEN 16Z069 watchdog timers, Rohm BU21029 touchscreen controllers, Cirrus Logic CS47L35, CS47L85, CS47L90, and CS47L91 codecs, Cougar 500k gaming keyboards, Qualcomm GENI-based I2C controllers, Actions Semiconductor Owl I2C controllers, ChromeOS EC-based USBPD chargers, and Analog Devices ADP5061 battery chargers.

USB : Nuvoton NPCM7XX on-chip EHCI USB controllers, Broadcom Stingray PCIe PHYs, and Renesas R-Car generation 3 PCIe PHYs.

There is also a new subsystem for the abstraction of GNSS (global navigation satellite systems — GPS, for example) receivers in the kernel. To date, such devices have been handled with an abundance of user-space drivers; the hope is to bring some order in this area. Support for u-blox and SiRFstar receivers has been added as well.

Kernel internal

The __deprecated marker, used to mark interfaces that should no longer be used, has been deprecated and removed from the kernel entirely. [54]Torvalds said : " They are not useful. They annoy everybody, and nobody ever does anything about them, because it's always 'somebody elses problem'. And when people start thinking that warnings are normal, they stop looking at them, and the real warnings that mean something go unnoticed. "

The minimum version of GCC required by the kernel has been moved up to 4.6.

There are a couple of significant changes that failed to get in this time around, including the [55]XArray data structure. The patches are thought to be ready, but they had the bad luck to be based on a tree that failed to be merged for other reasons, so Torvalds [56]didn't even look at them . That, in turn, blocks another set of patches intended to enable migration of slab-allocated objects.

The other big deferral is the [57]new system-call API for filesystem mounting . Despite ongoing [58]concerns about what happens when the same low-level device is mounted multiple times with conflicting options, Al Viro sent [59]a pull request to send this work upstream. The ensuing discussion made it clear that there is still not a consensus in this area, though, so it seems that this work has to wait for another cycle.

Assuming all goes well, the kernel will stabilize over the coming weeks and the final 4.19 release will happen in mid-October.

[60]Comments (1 posted)

[61]Measuring (and fixing) I/O-controller throughput loss

August 29, 2018

This article was contributed by Paolo Valente

Many services, from web hosting and video streaming to cloud storage, need to move data to and from storage. They also often require that each per-client I/O flow be guaranteed a non-zero amount of bandwidth and a bounded latency. An expensive way to provide these guarantees is to over-provision storage resources, keeping each resource underutilized, and thus have plenty of bandwidth available for the few I/O flows dispatched to each medium. Alternatively one can use an I/O controller. Linux provides two mechanisms designed to throttle some I/O streams to allow others to meet their bandwidth and latency requirements. These mechanisms work, but they come at a cost: a loss of as much as 80% of total available I/O bandwidth. I have run some tests to demonstrate this problem; some upcoming improvements to the [62]bfq I/O scheduler promise to improve the situation considerably.

Throttling does guarantee control, even on drives that happen to be highly utilized but, as will be seen, it has a hard time actually ensuring that drives are highly utilized. Even with greedy I/O flows, throttling easily ends up utilizing as little as 20% of the available speed of a flash-based drive. Such a speed loss may be particularly problematic with lower-end storage. On the opposite end, it is also disappointing with high-end hardware, as the Linux block I/O stack itself has been [63]redesigned from the ground up to fully utilize the high speed of modern, fast storage. In addition, throttling fails to guarantee the expected bandwidths if I/O contains both reads and writes, or is sporadic in nature.

On the bright side, there now seems to be an effective alternative for controlling I/O: the proportional-share policy provided by the bfq I/O scheduler. It enables nearly 100% storage bandwidth utilization, at least with some of the workloads that are problematic for throttling. An upcoming version of bfq may be able to achieve this result with almost all workloads. Finally, bfq guarantees bandwidths with all workloads. The current limitation of bfq is that its execution overhead becomes significant at speeds above 400,000 I/O operations per second on commodity CPUs.

Using the bfq I/O scheduler, Linux can now guarantee low latency to lightweight flows containing sporadic, short I/O. No throughput issues arise, and no configuration is required. This capability benefits important, time-sensitive tasks, such as video or audio streaming, as well as executing commands or starting applications. Although benchmarks are not available yet, these guarantees might also be provided by the newly proposed [64]I/O latency controller . It allows administrators to set target latencies for I/O requests originating from each group of processes, and favors the groups with the lowest target latency.

The testbed

I ran the tests with an ext4 filesystem mounted on a PLEXTOR PX-256M5S SSD, which features a peak rate of ~160MB/s with random I/O, and of ~500MB/s with sequential I/O. I used blk-mq, in Linux 4.18. The system was equipped with a 2.4GHz Intel Core i7-2760QM CPU and 1.3GHz DDR3 DRAM. In such a system, a single thread doing synchronous reads reaches a throughput of 23MB/s.

For the purposes of these tests, each process is considered to be in one of two groups, termed "target" and "interferers". A target is a single-process, I/O-bound group whose I/O is focused on. In particular, I measure the I/O throughput enjoyed by this group to get the minimum bandwidth delivered to the group. An interferer is single-process group whose role is to generate additional I/O that interferes with the I/O of the target. The tested workloads contain one target and multiple interferers.

The single process in each group either reads or writes, through asynchronous (buffered) operations, to one file — different from the file read or written by any other process — after invalidating the buffer cache for the file. I define a reader or writer process as either "random" or "sequential", depending on whether it reads or writes its file at random positions or sequentially. Finally, an interferer is defined as being either "active" or "inactive" depending on whether it performs I/O during the test. When an interferer is mentioned, it is assumed that the interferer is active.

Workloads are defined so as to try to cover the combinations that, I believe, most influence the performance of the storage device and of the I/O policies. For brevity, in this article I show results for only two groups of workloads:

Static sequential : four synchronous sequential readers or four asynchronous sequential writers, plus five inactive interferers.

Static random : four synchronous random readers, all with a block size equal to 4k, plus five inactive interferers.

To create each workload, I considered, for each mix of interferers in the group, two possibilities for the target: it could be either a random or a sequential synchronous reader. In [65]a longer version of this article [PDF] , you will also find results for workloads with varying degrees of I/O randomness, and for dynamic workloads (containing sporadic I/O sources). These extra results confirm the losses of throughput and I/O control for throttling that are shown here.

I/O policies

Linux provides two I/O-control mechanisms for guaranteeing (a minimum) bandwidth, or at least fairness, to long-lived flows: the throttling and proportional-share I/O policies. With throttling, one can set a maximum bandwidth limit — "max limit" for brevity — for the I/O of each group. Max limits can be used, in an indirect way, to provide the service guarantee at the focus of this article. For example, to guarantee minimum bandwidths to I/O flows, a group can be guaranteed a minimum bandwidth by limiting the maximum bandwidth of all the other groups.

Unfortunately, max limits have two drawbacks in terms of throughput. First, if some groups do not use their allocated bandwidth, that bandwidth cannot be reclaimed by other active groups. Second, limits must comply with the worst-case speed of the device, namely, its random-I/O peak rate. Such limits will clearly leave a lot of throughput unused with workloads that otherwise would drive the device to higher throughput levels. Maximizing throughput is simply not a goal of max limits. So, for brevity, test results with max limits are not shown here. You can find these results, plus a more detailed description of the above drawbacks, in the long version of this article.

Because of these drawbacks, a new, still experimental, low limit has been added to the throttling policy. If a group is assigned a low limit, then the throttling policy automatically limits the I/O of the other groups in such a way to guarantee to the group a minimum bandwidth equal to its assigned low limit. This new throttling mechanism throttles no group as long as every group is getting at least its assigned minimum bandwidth. I tested this mechanism, but did not consider the interesting problem of guaranteeing minimum bandwidths while, at the same time, enforcing maximum bandwidths.

The other I/O policy available in Linux, proportional share, provides weighted fairness. Each group is assigned a weight, and should receive a portion of the total throughput proportional to its weight. This scheme guarantees minimum bandwidths in the same way that low limits do in throttling. In particular, it guarantees to each group a minimum bandwidth equal to the ratio between the weight of the group, and the sum of the weights of all the groups that may be active at the same time.

The actual implementation of the proportional-share policy, on a given drive, depends on what flavor of the block layer is in use for that drive. If the drive is using the legacy block interface, the policy is implemented by the cfq I/O scheduler. Unfortunately, cfq fails to control bandwidths with flash-based storage, especially on drives featuring command queueing. This case is not considered in these tests. With drives using the multiqueue interface, proportional share is implemented by bfq. This is the combination considered in the tests.

To benchmark both throttling (low limits) and proportional share, I tested, for each workload, the combinations of I/O policies and I/O schedulers reported in the table below. In the end, there are three test cases for each workload. In addition, for some workloads, I considered two versions of bfq for the proportional-share policy.

Name

I/O policy

Scheduler

Parameter for target

Parameter for each of the four active interferers

Parameter for each of the five inactive interferers

Sum of parameters

low-none

Throttling with low limits

none

10MB/s

10MB/s (tot: 40)

20MB/s (tot: 100)

150MB/s

prop-bfq

Proportional share

bfq

300

100 (tot: 400)

200 (tot: 1000)

1700

For low limits, I report results with only none as the I/O scheduler, because the results are the same with kyber and mq-deadline.

The capabilities of the storage medium and of low limits drove the policy configurations. In particular:

The configuration of the target and of the active interferers for low-none is the one for which low-none provides its best possible minimum-bandwidth guarantee to the target: 10MB/s, guaranteed if all interferers are readers. Results remain the same regardless of the values used for target latency and idle time; I set them to 100µs and 1000µs, respectively, for every group.

Low limits for inactive interferers are set to twice the limits for active interferers, to pose greater difficulties to the policy.

I chose weights for prop-bfq so as to guarantee about the same minimum bandwidth as low-none to the target, in the same only-reader worst case as for low-none and to preserve, between the weights of active and inactive interferers, the same ratio as between the low limits of active and inactive interferers.

Full details on configurations can be found in the long version of this article.

Each workload was run ten times for each policy, plus ten times without any I/O control, i.e., with none as I/O scheduler and no I/O policy in use. For each run, I measured the I/O throughput of the target (which reveals the bandwidth provided to the target), the cumulative I/O throughput of the interferers, and the total I/O throughput. These quantities fluctuated very little during each run, as well as across different runs. Thus in the graphs I report only averages over per-run average throughputs. In particular, for the case of no I/O control, I report only the total I/O throughput, to give an idea of the throughput that can be reached without imposing any control.

Results

This plot shows throughput results for the simplest group of workloads: the static-sequential set.

With a random reader as the target against sequential readers as interferers, low-none does guarantee the configured low limit to the target. Yet it reaches only a low total throughput. The throughput of the random reader evidently oscillates around 10MB/s during the test. This implies that it is at least slightly below 10MB/s for a significant percentage of the time. But when this happens, the low-limit mechanism limits the maximum bandwidth of every active group to the low limit set for the group, i.e., to just 10MB/s. The end result is a total throughput lower than 10% of the throughput reached without I/O control.

That said, the high throughput achieved without I/O control is obtained by choking the random I/O of the target in favor of the sequential I/O of the interferers. Thus, it is probably more interesting to compare low-none throughput with the throughput reachable while actually guaranteeing 10MB/s to the target. The target is a single, synchronous, random reader, which reaches 23MB/s while active. So, to guarantee 10MB/s to the target, it is enough to serve it for about half of the time, and the interferers for the other half. Since the device reaches ~500MB/s with the sequential I/O of the interferers, the resulting throughput with this service scheme would be (500+23)/2, or about 260MB/s. low-none thus reaches less than 20% of the total throughput that could be reached while still preserving the target bandwidth.

prop-bfq provides the target with a slightly higher throughput than low-none. This makes it harder for prop-bfq to reach a high total throughput, because prop-bfq serves more random I/O (from the target) than low-none. Nevertheless, prop-bfq gets a much higher total throughput than low-none. According to the above estimate, this throughput is about 90% of the maximum throughput that could be reached, for this workload, without violating service guarantees. The reason for this good result is that bfq provides an effective implementation of the proportional-share service policy. At any time, each active group is granted a fraction of the current total throughput, and the sum of these fractions is equal to one; so group bandwidths naturally saturate the available total throughput at all times.

Things change with the second workload: a random reader against sequential writers. Now low-none reaches a much higher total throughput than prop-bfq. low-none serves much more sequential (write) I/O than prop-bfq because writes somehow break the low-limit mechanisms and prevail over the reads of the target. Conceivably, this happens because writes tend to both starve reads in the OS (mainly by eating all available I/O tags) and to cheat on their completion time in the drive. In contrast, bfq is intentionally configured to privilege reads, to counter these issues.

In particular, low-none gets an even higher throughput than no I/O control at all because it penalizes the random I/O of the target even more than the no-controller configuration.

Finally, with the last two workloads, prop-bfq reaches even higher total throughput than with the first two. It happens because the target also does sequential I/O, and serving sequential I/O is much more beneficial for throughput than serving random I/O. With these two workloads, the total throughput is, respectively, close to or much higher than that reached without I/O control. For the last workload, the total throughput is much higher because, differently from none, bfq privileges reads over asynchronous writes, and reads yield a higher throughput than writes. In contrast, low-none still gets lower or much lower throughput than prop-bfq, because of the same issues that hinder low-none throughput with the first two workloads.

As for bandwidth guarantees, with readers as interferers (third workload), prop-bfq, as expected, gives the target a fraction of the total throughput proportional to its weight. bfq approximates perfect proportional-share bandwidth distribution among groups doing I/O of the same type (reads or writes) and with the same locality (sequential or random). With the last workload, prop-bfq gives much more throughput to the reader than to all the interferers, because interferers are asynchronous writers, and bfq privileges reads.

The second group of workloads (static random), is the one, among all the workloads considered, for which prop-bfq performs worst. Results are shown below:

This chart reports results not only for mainline bfq, but also for an improved version of bfq which is currently under public testing. As can be seen, with only random readers, prop-bfq reaches a much lower total throughput than low-none. This happens because of the Achilles heel of the bfq I/O scheduler. If the process in service does synchronous I/O and has a higher weight than some other process, then, to give strong bandwidth guarantees to that process, bfq plugs I/O dispatching every time the process temporarily stops issuing I/O requests. In this respect, processes actually have differentiated weights and do synchronous I/O in the workloads tested. So bfq systematically performs I/O plugging for them. Unfortunately, this plugging empties the internal queues of the drive, which kills throughput with random I/O. And the I/O of all processes in these workloads is also random.

The situation reverses with a sequential reader as target. Yet, the most interesting results come from the new version of bfq, containing small changes to counter exactly the above weakness. This version recovers most of the throughput loss with the workload made of only random I/O and more; with the second workload, where the target is a sequential reader, it reaches about 3.7 times the total throughput of low-none.

When the main concern is the latency of flows containing short I/O, Linux seems now rather high performing, thanks to the bfq I/O scheduler and the I/O latency controller. But if the requirement is to provide explicit bandwidth guarantees (or just fairness) to I/O flows, then one must be ready to give up much or most of the speed of the storage media. bfq helps with some workloads, but loses most of the throughput with workloads consisting of mostly random I/O. Fortunately, there is apparently hope for much better performance since an improvement, still under development, seems to enable bfq to reach a high throughput with all workloads tested so far.

I wish to thank Vivek Goyal for enabling me to make this article much more fair and sound.]

[66]Comments (4 posted)

[67]KDE's onboarding initiative, one year later

August 24, 2018

This article was contributed by Marta Rybczyńska

[68]Akademy

In 2017, the KDE community decided on [69]three goals to concentrate on for the next few years. One of them was [70]streamlining the onboarding of new contributors (the others were [71]improving usability and [72]privacy ). During [73]Akademy , the yearly KDE conference that was held in Vienna in August, Neofytos Kolokotronis shared the status of the onboarding goal, the work done during the last year, and further plans. While it is a complicated process in a project as big and diverse as KDE, numerous improvements have been already made.

Two of the three KDE community goals were proposed by relative newcomers. Kolokotronis was one of those, having joined the [74]KDE Promo team not long before proposing the focus on onboarding. He had previously been involved with [75]Chakra Linux , a distribution based on KDE software. The fact that new members of the community proposed strategic goals was also noted in the [76]Sunday keynote by Claudia Garad .

Proper onboarding adds excitement to the contribution process and increases retention, he explained. When we look at [77]the definition of onboarding , it is a process in which the new contributors acquire knowledge, skills, and behaviors so that they can contribute effectively. Kolokotronis proposed to see it also as socialization: integration into the project's relationships, culture, structure, and procedures.

The gains from proper onboarding are many. The project can grow by attracting new blood with new perspectives and solutions. The community maintains its health and stays vibrant. Another important advantage of efficient onboarding is that replacing current contributors becomes easier when they change interests, jobs, or leave the project for whatever reason. Finally, successful onboarding adds new advocates to the project.

Achievements so far and future plans

The team started with ideas for a centralized onboarding process for the whole of KDE. They found out quickly that this would not work because KDE is "very decentralized", so it is hard to provide tools and procedures that are going to work for the whole project. According to Kolokotronis, other characteristics of KDE that impact onboarding are high diversity, remote and online teams, and hundreds of contributors in dozens of projects and teams. In addition, new contributors already know in which area they want to take part and they prefer specific information that will be directly useful for them.

So the team changed its approach; several changes have since been proposed and implemented. The [78]Get Involved page, which is expected to be one of the resources new contributors read first, has been rewritten. For the [79]Junior Jobs page , the team is [80] [81]discussing what the generic content for KDE as a whole should be. The team simplified [82]Phabricator registration , which resulted in documenting the process better. Another part of the work includes the [83]KDE Bugzilla ; it includes, for example initiatives to limit the number of states of a ticket or remove obsolete products.

The [84]Plasma Mobile team is heavily involved in the onboarding goal. The Plasma Mobile developers have simplified their development environment setup and created an [85]interactive "Get Involved" page. In addition, the Plasma team changed the way task descriptions are written; they now contain more detail, so that it is easier to get involved. The basic description should be short and clear, and it should include details of the problem and possible solutions. The developers try to share the list of skills necessary to fulfill the tasks and include clear links to the technical resources needed.

Kolokotronis and team also identified a new potential source of contributors for KDE: distributions using KDE. They have the advantage of already knowing and using the software. The next idea the team is working on is to make sure that setting up a development environment is easy. The team plans to work on this during a dedicated sprint this autumn.

Searching for new contributors

Kolokotronis plans to search for new contributors at the periphery of the project, among the "skilled enthusiasts": loyal users who actually care about the project. They "can make wonders", he said. Those individuals may be also less confident or shy, have troubles making the first step, and need guidance. The project leaders should take that into account.

In addition, newcomers are all different. Kolokotronis provided a long list of how contributors differ, including skills and knowledge, motives and interests, and time and dedication. His advice is to "try to find their superpower", the skills they have that are missing in the team. Those "superpowers" can then be used for the benefit of the project.

If a project does nothing else, he said, it can start with its documentation. However, this does not only mean code documentation. Writing down the procedures or information about the internal work of the project, like who is working on what, is an important part of a project's documentation and helps newcomers. There should be also guidelines on how to start, especially setting up the development environment.

The first thing the project leaders should do, according to Kolokotronis, is to spend time on introducing newcomers to the project. Ideally every new contributor should be assigned mentors — more experienced members who can help them when needed. The mentors and project leaders should find tasks that are interesting for each person. Answering an audience question on suggestions for shy new contributors, he recommended even more mentoring. It is also very helpful to make sure that newcomers have enough to read, but "avoid RTFM", he highlighted. It is also easy for a new contributor "to fly away", he said. The solution is to keep requesting things and be proactive.

What the project can do?

Kolokotronis suggested a number of actions for a project when it wants to improve its onboarding. The first step is preparation: the project leaders should know the team's and the project's needs. Long-term planning is important, too. It is not enough to wait for contributors to come — the project should be proactive, which means reaching out to candidates, suggesting appropriate tasks and, finally, making people available for the newcomers if they need help.

This leads to next step: to be a mentor. Kolokotronis suggests being a "great host", but also trying to phase out the dependency on the mentor rapidly. "We have been all newcomers", he said. It can be intimidating to join an existing group. Onboarding creates a sense of belonging which, in turn, increases retention.

The last step proposed was to be strategic. This includes thinking about the emotions you want newcomers to feel. Kolokotronis explained the strategic part with an example. The overall goal is (surprise!) improve onboarding of new contributors. An intermediate objective might be to keep the newcomers after they have made their first commit. If your strategy is to keep them confident and proud, you can use different tactics like praise and acknowledgment of the work in public. Another useful tactic may be assigning simple tasks, according to the skill of the contributor.

To summarize, the most important thing, according to Kolokotronis, is to respond quickly and spend time with new contributors. This time should be used to explain procedures, and to introduce the people and culture. It is also essential to guide first contributions and praise contributor's skill and effort. Increase the difficulty of tasks over time to keep contributors motivated and challenged. And finally, he said, "turn them into mentors".

Kolokotronis acknowledges that onboarding "takes time" and "everyone complains" about it. However, he is convinced that it is beneficial in the long term and that it decreases developer turnover.

Advice to newcomers

Kolokotronis concluded with some suggestions for newcomers to a project. They should try to be persistent and to not get discouraged when something goes wrong. Building connections from the very beginning is helpful. He suggests asking questions as if you were already a member "and things will be fine". However, accept criticism if it happens.

One of the next actions of the onboarding team will be to collect feedback from newcomers and experienced contributors to see if they agree on the ideas and processes introduced so far.

[86]Comments (none posted)

[87]Sharing and archiving data sets with Dat

August 27, 2018

This article was contributed by Antoine Beaupré

[88]Dat is a new peer-to-peer protocol that uses some of the concepts of [89]BitTorrent and Git. Dat primarily targets researchers and open-data activists as it is a great tool for sharing, archiving, and cataloging large data sets. But it can also be used to implement decentralized web applications in a novel way.

Dat quick primer

Dat is written in JavaScript, so it can be installed with npm , but there are [90]standalone binary builds and a [91]desktop application (as an AppImage). An [92]online viewer can be used to inspect data for those who do not want to install arbitrary binaries on their computers.

The command-line application allows basic operations like downloading existing data sets and sharing your own. Dat uses a 32-byte hex string that is an [93]ed25519 public key , which is is used to discover and find content on the net. For example, this will download some sample data: $ dat clone \

dat://778f8d955175c92e4ced5e4f5563f69bfec0c86cc6f670352c457943666fe639 \

~/Downloads/dat-demo

Similarly, the share command is used to share content. It indexes the files in a given directory and creates a new unique address like the one above. The share command starts a server that uses multiple discovery mechanisms (currently, the [94]Mainline Distributed Hash Table (DHT), a [95]custom DNS server , and multicast DNS) to announce the content to its peers. This is how another user, armed with that public key, can download that content with dat clone or mirror the files continuously with dat sync .

So far, this looks a lot like BitTorrent [96]magnet links updated with 21st century cryptography. But Dat adds revisions on top of that, so modifications are automatically shared through the swarm. That is important for public data sets as those are often dynamic in nature. Revisions also make it possible to use [97]Dat as a backup system by saving the data incrementally using an [98]archiver .

While Dat is designed to work on larger data sets, processing them for sharing may take a while. For example, sharing the Linux kernel source code required about five minutes as Dat worked on indexing all of the files. This is comparable to the performance offered by [99]IPFS and BitTorrent. Data sets with more or larger files may take quite a bit more time.

One advantage that Dat has over IPFS is that it doesn't duplicate the data. When IPFS imports new data, it duplicates the files into ~/.ipfs . For collections of small files like the kernel, this is not a huge problem, but for larger files like videos or music, it's a significant limitation. IPFS eventually implemented a solution to this [100]problem in the form of the experimental [101]filestore feature , but it's not enabled by default. Even with that feature enabled, though, changes to data sets are not automatically tracked. In comparison, Dat operation on dynamic data feels much lighter. The downside is that each set needs its own dat share process.

Like any peer-to-peer system, Dat needs at least one peer to stay online to offer the content, which is impractical for mobile devices. Hosting providers like [102]Hashbase (which is a [103]pinning service in Dat jargon) can help users keep content online without running their own [104]server . The closest parallel in the traditional web ecosystem would probably be content distribution networks (CDN) although pinning services are not necessarily geographically distributed and a CDN does not necessarily retain a complete copy of a website. [105]

A web browser called [106]Beaker , based on the [107]Electron framework, can access Dat content natively without going through a pinning service. Furthermore, Beaker is essential to get any of the [108]Dat applications working, as they fundamentally rely on dat:// URLs to do their magic. This means that Dat applications won't work for most users unless they install that special web browser. There is a [109]Firefox extension called " [110]dat-fox " for people who don't want to install yet another browser, but it requires installing a [111]helper program . The extension will be able to load dat:// URLs but many applications will still not work. For example, the [112]photo gallery application completely fails with dat-fox.

Dat-based applications look promising from a privacy point of view. Because of its peer-to-peer nature, users regain control over where their data is stored: either on their own computer, an online server, or by a trusted third party. But considering the protocol is not well established in current web browsers, I foresee difficulties in adoption of that aspect of the Dat ecosystem. Beyond that, it is rather disappointing that Dat applications cannot run natively in a web browser given that JavaScript is designed exactly for that.

Dat privacy

An advantage Dat has over other peer-to-peer protocols like BitTorrent is end-to-end encryption. I was originally concerned by the encryption design when reading the [113]academic paper [PDF] :

It is up to client programs to make design decisions around which discovery networks they trust. For example if a Dat client decides to use the BitTorrent DHT to discover peers, and they are searching for a publicly shared Dat key (e.g. a key cited publicly in a published scientific paper) with known contents, then because of the privacy design of the BitTorrent DHT it becomes public knowledge what key that client is searching for.

So in other words, to share a secret file with another user, the public key is transmitted over a secure side-channel, only to then leak during the discovery process. Fortunately, the public Dat key is not directly used during discovery as it is [114]hashed with BLAKE2B . Still, the security model of Dat assumes the public key is private, which is a rather counterintuitive concept that might upset cryptographers and confuse users who are frequently encouraged to type such strings in address bars and search engines as part of the Dat experience. There is a [115]security & privacy FAQ in the Dat documentation warning about this problem:

One of the key elements of Dat privacy is that the public key is never used in any discovery network. The public key is hashed, creating the discovery key. Whenever peers attempt to connect to each other, they use the discovery key.

Data is encrypted using the public key, so it is important that this key stays secure.

There are other privacy issues outlined in the document; it states that " Dat faces similar privacy risks as BitTorrent ":

When you download a dataset, your IP address is exposed to the users sharing that dataset. This may lead to honeypot servers collecting IP addresses, as we've seen in Bittorrent. However, with dataset sharing we can create a web of trust model where specific institutions are trusted as primary sources for datasets, diminishing the sharing of IP addresses.

A Dat blog post refers to this issue as [116]reader privacy and it is, indeed, a sensitive issue in peer-to-peer networks. It is how BitTorrent users are discovered and served scary verbiage from lawyers, after all. But Dat makes this a little better because, to join a swarm, you must know what you are looking for already, which means peers who can look at swarm activity only include users who know the secret public key. This works well for secret content, but for larger, public data sets, it is a real problem; it is why the Dat project has [117]avoided creating a Wikipedia mirror so far.

I found another privacy issue that is not documented in the security FAQ during my review of the protocol. As mentioned earlier, the [118]Dat discovery protocol routinely phones home to DNS servers operated by the Dat project. This implies that the default discovery servers (and an attacker watching over their traffic) know who is publishing or seeking content, in essence discovering the "social network" behind Dat. This discovery mechanism can be disabled in clients, but a similar privacy issue applies to the DHT as well, although that is distributed so it doesn't require trust of the Dat project itself.

Considering those aspects of the protocol, privacy-conscious users will probably want to use Tor or other anonymization techniques to work around those concerns.

The future of Dat

[119]Dat 2.0 was released in June 2017 with performance improvements and protocol changes. [120]Dat Enhancement Proposals (DEPs) guide the project's future development; most work is currently geared toward implementing the draft " [121]multi-writer proposal " in [122]HyperDB . Without multi-writer support, only the original publisher of a Dat can modify it. According to Joe Hand, co-executive-director of [123]Code for Science & Society (CSS) and Dat core developer, in an IRC chat, "supporting multiwriter is a big requirement for lots of folks". For example, while Dat might allow Alice to share her research results with Bob, he cannot modify or contribute back to those results. The multi-writer extension allows for Alice to assign trust to Bob so he can have write access to the data.

Unfortunately, the current proposal doesn't solve the " hard problems " of " conflict merges and secure key distribution ". The former will be worked out through user interface tweaks, but the latter is a classic problem that security projects have typically trouble finding solutions for—Dat is no exception. How will Alice securely trust Bob? The OpenPGP web of trust? Hexadecimal fingerprints read over the phone? Dat doesn't provide a magic solution to this problem.

Another thing limiting adoption is that Dat is not packaged in any distribution that I could find (although I [124]requested it in Debian ) and, considering the speed of change of the JavaScript ecosystem, this is unlikely to change any time soon. A [125]Rust implementation of the Dat protocol has started, however, which might be easier to package than the multitude of [126]Node.js modules. In terms of mobile device support, there is an experimental Android web browser with Dat support called [127]Bunsen , which somehow doesn't run on my phone. Some adventurous users have successfully run Dat in [128]Termux . I haven't found an app running on iOS at this point.

Even beyond platform support, distributed protocols like Dat have a tough slope to climb against the virtual monopoly of more centralized protocols, so it remains to be seen how popular those tools will be. Hand says Dat is supported by multiple non-profit organizations. Beyond CSS, [129]Blue Link Labs is working on the Beaker Browser as a self-funded startup and a grass-roots organization, [130]Digital Democracy , has contributed to the project. The [131]Internet Archive has [132]announced a collaboration between itself, CSS, and the California Digital Library to launch a pilot project to see " how members of a cooperative, decentralized network can leverage shared services to ensure data preservation while reducing storage costs and increasing replication counts ".

Hand said adoption in academia has been "slow but steady" and that the [133]Dat in the Lab project has helped identify areas that could help researchers adopt the project. Unfortunately, as is the case with many free-software projects, he said that "our team is definitely a bit limited on bandwidth to push for bigger adoption". Hand said that the project received a grant from [134]Mozilla Open Source Support to improve its documentation, which will be a big help.

Ultimately, Dat suffers from a problem common to all peer-to-peer applications, which is naming. Dat addresses are not exactly intuitive: humans do not remember strings of 64 hexadecimal characters well. For this, Dat took a [135]similar approach to IPFS by using DNS TXT records and /.well-known URL paths to bridge existing, human-readable names with Dat hashes. So this sacrifices a part of the decentralized nature of the project in favor of usability.

I have tested a lot of distributed protocols like Dat in the past and I am not sure Dat is a clear winner. It certainly has advantages over IPFS in terms of usability and resource usage, but the lack of packages on most platforms is a big limit to adoption for most people. This means it will be difficult to share content with my friends and family with Dat anytime soon, which would probably be my primary use case for the project. Until the protocol reaches the wider adoption that BitTorrent has seen in terms of platform support, I will probably wait before switching everything over to this promising project.

[136]Comments (11 posted)

Page editor : Jonathan Corbet

Inside this week's LWN.net Weekly Edition

[137]Briefs : OpenSSH 7.8; 4.19-rc1; Which stable?; Netdev 0x12; Bison 3.1; Quotes; ...

[138]Announcements : Newsletters; events; security updates; kernel patches; ... Next page : [139]Brief items>>



[1] https://lwn.net/Articles/763743/

[2] https://lwn.net/Articles/763626/

[3] https://lwn.net/Articles/763641/

[4] https://lwn.net/Articles/763106/

[5] https://lwn.net/Articles/763603/

[6] https://lwn.net/Articles/763175/

[7] https://lwn.net/Articles/763492/

[8] https://lwn.net/Articles/763254/

[9] https://lwn.net/Articles/763255/

[10] https://lwn.net/Articles/763743/#Comments

[11] https://lwn.net/Articles/763626/

[12] http://julialang.org/

[13] https://julialang.org/blog/2018/08/one-point-zero

[14] https://julialang.org/benchmarks/

[15] https://juliacomputing.com/

[16] https://en.wikipedia.org/wiki/Read%E2%80%93eval%E2%80%93print_loop

[17] http://llvm.org/

[18] http://www.3blue1brown.com/essence-of-linear-algebra-page/

[19] http://www.netlib.org/lapack/

[20] https://lwn.net/Articles/657157/

[21] https://julialang.org/publications/julia-fresh-approach-BEKS.pdf

[22] https://lwn.net/Articles/738915/

[23] https://pypy.org/

[24] https://github.com/JuliaPy/PyCall.jl

[25] https://github.com/JuliaInterop/RCall.jl

[26] https://docs.julialang.org/en/stable/

[27] https://julialang.org/learning/

[28] http://bogumilkaminski.pl/files/julia_express.pdf

[29] https://docs.julialang.org/en/stable/manual/noteworthy-differences/#Noteworthy-differences-from-Python-1

[30] https://lwn.net/Articles/746386/

[31] https://github.com/JuliaLang/IJulia.jl

[32] https://lwn.net/Articles/764001/

[33] https://lwn.net/Articles/763626/#Comments

[34] https://lwn.net/Articles/763641/

[35] https://lwn.net/Archives/ConferenceByYear/#2018-Linux_Security_Summit_NA

[36] https://events.linuxfoundation.org/events/linux-security-summit-north-america-2018/

[37] https://kernsec.org/wiki/index.php/Kernel_Self_Protection_Project

[38] https://lwn.net/Articles/763644/

[39] https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html

[40] https://lwn.net/Articles/749064/

[41] https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=02361bc77888

[42] https://lore.kernel.org/lkml/CA+55aFzCG-zNmZwX4A2FQpadafLfEzK6CC=qPXydAacU1RqZWA@mail.gmail.com/T/#u

[43] https://lwn.net/Articles/758245/

[44] https://lwn.net/Articles/718888/

[45] https://lwn.net/Articles/744507/

[46] https://outflux.net/slides/2018/lss/danger.pdf

[47] https://lwn.net/Articles/763641/#Comments

[48] https://lwn.net/Articles/763106/

[49] https://lwn.net/Articles/763497/

[50] https://lwn.net/Articles/762566/

[51] https://lwn.net/Articles/761118/

[52] https://git.kernel.org/linus/d5791044d2e5749ef4de84161cec5532e2111540

[53] https://lwn.net/ml/linux-kernel/20180630000253.70103-1-sque@chromium.org/

[54] https://git.kernel.org/linus/771c035372a036f83353eef46dbb829780330234

[55] https://lwn.net/Articles/745073/

[56] https://lwn.net/ml/linux-kernel/CA+55aFxFjAmrFpwQmEHCthHOzgidCKnod+cNDEE+3Spu9o1s3w@mail.gmail.com/

[57] https://lwn.net/Articles/759499/

[58] https://lwn.net/Articles/762355/

[59] https://lwn.net/ml/linux-fsdevel/20180823223145.GK6515@ZenIV.linux.org.uk/

[60] https://lwn.net/Articles/763106/#Comments

[61] https://lwn.net/Articles/763603/

[62] https://lwn.net/Articles/601799/

[63] https://lwn.net/Articles/552904

[64] https://lwn.net/Articles/758963/

[65] http://algogroup.unimore.it/people/paolo/pub-docs/extended-lat-bw-throughput.pdf

[66] https://lwn.net/Articles/763603/#Comments

[67] https://lwn.net/Articles/763175/

[68] https://lwn.net/Archives/ConferenceByYear/#2018-Akademy

[69] https://dot.kde.org/2017/11/30/kdes-goals-2018-and-beyond

[70] https://phabricator.kde.org/T7116

[71] https://phabricator.kde.org/T6831

[72] https://phabricator.kde.org/T7050

[73] https://akademy.kde.org/

[74] https://community.kde.org/Promo

[75] https://www.chakralinux.org/

[76] https://conf.kde.org/en/Akademy2018/public/events/79

[77] https://en.wikipedia.org/wiki/Onboarding

[78] https://community.kde.org/Get_Involved

[79] https://community.kde.org/KDE/Junior_Jobs

[80] https://lwn.net/Articles/763189/

[81] https://phabricator.kde.org/T8686

[82] https://phabricator.kde.org/T7646

[83] https://bugs.kde.org/

[84] https://www.plasma-mobile.org/index.html

[85] https://www.plasma-mobile.org/findyourway

[86] https://lwn.net/Articles/763175/#Comments

[87] https://lwn.net/Articles/763492/

[88] https://datproject.org

[89] https://www.bittorrent.com/

[90] https://github.com/datproject/dat/releases

[91] https://docs.datproject.org/install

[92] https://datbase.org/

[93] https://ed25519.cr.yp.to/

[94] https://en.wikipedia.org/wiki/Mainline_DHT

[95] https://github.com/mafintosh/dns-discovery

[96] https://en.wikipedia.org/wiki/Magnet_URI_scheme

[97] https://blog.datproject.org/2017/10/13/using-dat-for-automatic-file-backups/

[98] https://github.com/mafintosh/hypercore-archiver

[99] https://ipfs.io/

[100] https://github.com/ipfs/go-ipfs/issues/875

[101] https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#ipfs-filestore

[102] https://hashbase.io/

[103] https://github.com/datprotocol/DEPs/blob/master/proposals/0003-http-pinning-service-api.md

[104] https://docs.datproject.org/server

[105] https://lwn.net/Articles/763544/

[106] https://beakerbrowser.com/

[107] https://electronjs.org/

[108] https://github.com/beakerbrowser/explore

[109] https://addons.mozilla.org/en-US/firefox/addon/dat-p2p-protocol/

[110] https://github.com/sammacbeth/dat-fox

[111] https://github.com/sammacbeth/dat-fox-helper

[112] https://github.com/beakerbrowser/dat-photos-app

[113] https://github.com/datproject/docs/raw/master/papers/dat-paper.pdf

[114] https://github.com/datprotocol/DEPs/blob/653e0cf40233b5d474cddc04235577d9d55b2934/proposals/0000-peer-discovery.md#discovery-keys

[115] https://docs.datproject.org/security

[116] https://blog.datproject.org/2016/12/12/reader-privacy-on-the-p2p-web/

[117] https://blog.datproject.org/2017/12/10/dont-ship/

[118] https://github.com/datprotocol/DEPs/pull/7

[119] https://blog.datproject.org/2017/06/01/dat-sleep-release/

[120] https://github.com/datprotocol/DEPs

[121] https://github.com/datprotocol/DEPs/blob/master/proposals/0008-multiwriter.md

[122] https://github.com/mafintosh/hyperdb

[123] https://codeforscience.org/

[124] https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=890565

[125] https://github.com/datrs

[126] https://nodejs.org/en/

[127] https://bunsenbrowser.github.io/#!index.md

[128] https://termux.com/

[129] https://bluelinklabs.com/

[130] https://www.digital-democracy.org/

[131] https://archive.org

[132] https://blog.archive.org/2018/06/05/internet-archive-code-for-science-and-society-and-california-digital-library-to-partner-on-a-data-sharing-and-preservation-pilot-project/

[133] https://github.com/codeforscience/Dat-in-the-Lab

[134] https://www.mozilla.org/en-US/moss/

[135] https://github.com/datprotocol/DEPs/blob/master/proposals/0005-dns.md

[136] https://lwn.net/Articles/763492/#Comments

[137] https://lwn.net/Articles/763254/

[138] https://lwn.net/Articles/763255/

[139] https://lwn.net/Articles/763254/ +

+
+
+ diff --git a/test/expected/LWN/0000763603 b/test/expected/LWN/0000763603 new file mode 100644 index 0000000..82ae8c2 --- /dev/null +++ b/test/expected/LWN/0000763603 @@ -0,0 +1,13 @@ + [$] MEASURING (AND FIXING) I/O-CONTROLLER THROUGHPUT LOSS + + [Kernel] Aug 29, 2018 21:20 UTC (Wed) (corbet) + + o News link: https://lwn.net/Articles/763603 + o Source link: + + + [$] Sorry, this article is currently available to LWN + suscribers only [https://lwn.net/subscribe/]. + + + diff --git a/test/expected/LWN/0000763603.header b/test/expected/LWN/0000763603.header new file mode 100644 index 0000000..b547597 --- /dev/null +++ b/test/expected/LWN/0000763603.header @@ -0,0 +1,19 @@ +0[$] Measuring (and fixing) I/O-controller throughput loss null/LWN/0000763603 70 +i [Kernel] Aug 29, 2018 21:20 UTC (Wed) (corbet) +i +i Many services, from web hosting and video streaming to cloud +i storage, need to move data to and from storage. They also +i often require that each per-client I/O flow be guaranteed a +i non-zero amount of bandwidth and a bounded latency. An +i expensive way to provide these guarantees is to over-provision +i storage resources, keeping each resource underutilized, and +i thus have plenty of bandwidth available for the few I/O flows +i dispatched to each medium. Alternatively one can use an I/O +i controller. Linux provides two mechanisms designed to throttle +i some I/O streams to allow others to meet their bandwidth and +i latency requirements. These mechanisms work, but they come at +i a cost: a loss of as much as 80% of total available I/O +i bandwidth. I have run some tests to demonstrate this problem; +i some upcoming improvements to the bfq I/O scheduler promise to +i improve the situation considerably. +i diff --git a/test/expected/LWN/0000763603.header.html b/test/expected/LWN/0000763603.header.html new file mode 100644 index 0000000..10fb39a --- /dev/null +++ b/test/expected/LWN/0000763603.header.html @@ -0,0 +1,20 @@ + + + + + + + + +
+

[$] Measuring (and fixing) I/O-controller throughput loss

+
([Kernel] Aug 29, 2018 21:20 UTC (Wed) (corbet))
+
+
+ Many services, from web hosting and video streaming to cloud storage, need to move data to and from storage. They also often require that each per-client I/O flow be guaranteed a non-zero amount of bandwidth and a bounded latency. An expensive way to provide these guarantees is to over-provision storage resources, keeping each resource underutilized, and thus have plenty of bandwidth available for the few I/O flows dispatched to each medium. Alternatively one can use an I/O controller. Linux provides two mechanisms designed to throttle some I/O streams to allow others to meet their bandwidth and latency requirements. These mechanisms work, but they come at a cost: a loss of as much as 80% of total available I/O bandwidth. I have run some tests to demonstrate this problem; some upcoming improvements to the bfq I/O scheduler promise to improve the situation considerably. +
+
+
+ diff --git a/test/expected/LWN/0000763603.html b/test/expected/LWN/0000763603.html new file mode 100644 index 0000000..d0dfc41 --- /dev/null +++ b/test/expected/LWN/0000763603.html @@ -0,0 +1,25 @@ + + + + + + + + +
+

[$] Measuring (and fixing) I/O-controller throughput loss

+
([Kernel] Aug 29, 2018 21:20 UTC (Wed) (corbet))
+
+ +
+
+ [$] Sorry, this article is currently available to LWN suscribers only [https://lwn.net/subscribe/]. +
+
+
+ diff --git a/test/expected/LWN/0000763729 b/test/expected/LWN/0000763729 new file mode 100644 index 0000000..745ba29 --- /dev/null +++ b/test/expected/LWN/0000763729 @@ -0,0 +1,13 @@ + [$] PROTECTING FILES WITH FS-VERITY + + [Kernel] Aug 30, 2018 18:50 UTC (Thu) (corbet) + + o News link: https://lwn.net/Articles/763729 + o Source link: + + + [$] Sorry, this article is currently available to LWN + suscribers only [https://lwn.net/subscribe/]. + + + diff --git a/test/expected/LWN/0000763729.header b/test/expected/LWN/0000763729.header new file mode 100644 index 0000000..934f640 --- /dev/null +++ b/test/expected/LWN/0000763729.header @@ -0,0 +1,13 @@ +0[$] Protecting files with fs-verity null/LWN/0000763729 70 +i [Kernel] Aug 30, 2018 18:50 UTC (Thu) (corbet) +i +i The developers of the Android system have, among their many +i goals, the wish to better protect Android devices against +i persistent compromise. It is bad if a device is taken over by +i an attacker; it's worse if it remains compromised even after a +i reboot. Numerous mechanisms for ensuring the integrity of +i installed system files have been proposed and implemented over +i the years. But it seems there is always room for one more; to +i fill that space, the fs-verity mechanism is being proposed as +i a way to protect individual files from malicious modification. +i diff --git a/test/expected/LWN/0000763729.header.html b/test/expected/LWN/0000763729.header.html new file mode 100644 index 0000000..c110703 --- /dev/null +++ b/test/expected/LWN/0000763729.header.html @@ -0,0 +1,20 @@ + + + + + + + + +
+

[$] Protecting files with fs-verity

+
([Kernel] Aug 30, 2018 18:50 UTC (Thu) (corbet))
+
+
+ The developers of the Android system have, among their many goals, the wish to better protect Android devices against persistent compromise. It is bad if a device is taken over by an attacker; it's worse if it remains compromised even after a reboot. Numerous mechanisms for ensuring the integrity of installed system files have been proposed and implemented over the years. But it seems there is always room for one more; to fill that space, the fs-verity mechanism is being proposed as a way to protect individual files from malicious modification. +
+
+
+ diff --git a/test/expected/LWN/0000763729.html b/test/expected/LWN/0000763729.html new file mode 100644 index 0000000..01ceedf --- /dev/null +++ b/test/expected/LWN/0000763729.html @@ -0,0 +1,25 @@ + + + + + + + + +
+

[$] Protecting files with fs-verity

+
([Kernel] Aug 30, 2018 18:50 UTC (Thu) (corbet))
+
+ +
+
+ [$] Sorry, this article is currently available to LWN suscribers only [https://lwn.net/subscribe/]. +
+
+
+ diff --git a/test/expected/LWN/0000763789 b/test/expected/LWN/0000763789 new file mode 100644 index 0000000..4a05b04 --- /dev/null +++ b/test/expected/LWN/0000763789 @@ -0,0 +1,13 @@ + [$] LWN.NET WEEKLY EDITION FOR SEPTEMBER 6, 2018 + + + + o News link: https://lwn.net/Articles/763789/ + o Source link: + + + [$] Sorry, this article is currently available to LWN + suscribers only [https://lwn.net/subscribe/]. + + + diff --git a/test/expected/LWN/0000763789.header b/test/expected/LWN/0000763789.header new file mode 100644 index 0000000..9bb9525 --- /dev/null +++ b/test/expected/LWN/0000763789.header @@ -0,0 +1,3 @@ +0[$] LWN.net Weekly Edition for September 6, 2018 null/LWN/0000763789 70 +i +i diff --git a/test/expected/LWN/0000763789.header.html b/test/expected/LWN/0000763789.header.html new file mode 100644 index 0000000..ad726d4 --- /dev/null +++ b/test/expected/LWN/0000763789.header.html @@ -0,0 +1,20 @@ + + + + + + + + +
+

[$] LWN.net Weekly Edition for September 6, 2018

+
+
+
+ +
+
+
+ diff --git a/test/expected/LWN/0000763789.html b/test/expected/LWN/0000763789.html new file mode 100644 index 0000000..0638c5f --- /dev/null +++ b/test/expected/LWN/0000763789.html @@ -0,0 +1,25 @@ + + + + + + + + +
+

[$] LWN.net Weekly Edition for September 6, 2018

+
+
+ +
+
+ [$] Sorry, this article is currently available to LWN suscribers only [https://lwn.net/subscribe/]. +
+
+
+ diff --git a/test/expected/LWN/0000763987 b/test/expected/LWN/0000763987 new file mode 100644 index 0000000..aa76279 --- /dev/null +++ b/test/expected/LWN/0000763987 @@ -0,0 +1,19 @@ + KERNEL PREPATCH 4.19-RC2 + + [Kernel] Sep 2, 2018 22:29 UTC (Sun) (corbet) + + o News link: https://lwn.net/Articles/763987/ + o Source link: + + + The [1]4.19-rc2 kernel prepatch is out for testing. " As + usual, the rc2 release is pretty small. People are taking a + breather after the merge window, and it takes a bit of time + for bug reports to start coming in and get identified. " + + + + [1] https://lwn.net/Articles/763988/ + + + diff --git a/test/expected/LWN/0000763987.header b/test/expected/LWN/0000763987.header new file mode 100644 index 0000000..8fc90d4 --- /dev/null +++ b/test/expected/LWN/0000763987.header @@ -0,0 +1,8 @@ +0Kernel prepatch 4.19-rc2 null/LWN/0000763987 70 +i [Kernel] Sep 2, 2018 22:29 UTC (Sun) (corbet) +i +i The 4.19-rc2 kernel prepatch is out for testing. "As usual, +i the rc2 release is pretty small. People are taking a breather +i after the merge window, and it takes a bit of time for bug +i reports to start coming in and get identified." +i diff --git a/test/expected/LWN/0000763987.header.html b/test/expected/LWN/0000763987.header.html new file mode 100644 index 0000000..76f8132 --- /dev/null +++ b/test/expected/LWN/0000763987.header.html @@ -0,0 +1,20 @@ + + + + + + + + +
+

Kernel prepatch 4.19-rc2

+
([Kernel] Sep 2, 2018 22:29 UTC (Sun) (corbet))
+
+
+ The 4.19-rc2 kernel prepatch is out for testing. "As usual, the rc2 release is pretty small. People are taking a breather after the merge window, and it takes a bit of time for bug reports to start coming in and get identified." +
+
+
+ diff --git a/test/expected/LWN/0000763987.html b/test/expected/LWN/0000763987.html new file mode 100644 index 0000000..e73655b --- /dev/null +++ b/test/expected/LWN/0000763987.html @@ -0,0 +1,25 @@ + + + + + + + + +
+

Kernel prepatch 4.19-rc2

+
([Kernel] Sep 2, 2018 22:29 UTC (Sun) (corbet))
+
+ +
+
+ The [1]4.19-rc2 kernel prepatch is out for testing. " As usual, the rc2 release is pretty small. People are taking a breather after the merge window, and it takes a bit of time for bug reports to start coming in and get identified. "



[1] https://lwn.net/Articles/763988/ +
+
+
+ diff --git a/test/expected/LWN/0000764001 b/test/expected/LWN/0000764001 new file mode 100644 index 0000000..f0c680c --- /dev/null +++ b/test/expected/LWN/0000764001 @@ -0,0 +1,13 @@ + [$] AN INTRODUCTION TO THE JULIA LANGUAGE, PART 2 + + [Development] Sep 4, 2018 15:57 UTC (Tue) (jake) + + o News link: https://lwn.net/Articles/764001 + o Source link: + + + [$] Sorry, this article is currently available to LWN + suscribers only [https://lwn.net/subscribe/]. + + + diff --git a/test/expected/LWN/0000764001.header b/test/expected/LWN/0000764001.header new file mode 100644 index 0000000..529c9fa --- /dev/null +++ b/test/expected/LWN/0000764001.header @@ -0,0 +1,12 @@ +0[$] An introduction to the Julia language, part 2 null/LWN/0000764001 70 +i [Development] Sep 4, 2018 15:57 UTC (Tue) (jake) +i +i Part 1 of this series introduced the Julia project's goals and +i development process, along with the language syntax, including +i the basics of control flow, data types, and, in more detail, +i how to work with arrays. In this part, user-defined functions +i and the central concept of multiple dispatch are described. It +i will also survey Julia's module and package system, cover some +i syntax features, show how to make plots, and briefly dip into +i macros and distributed computing. +i diff --git a/test/expected/LWN/0000764001.header.html b/test/expected/LWN/0000764001.header.html new file mode 100644 index 0000000..8611c17 --- /dev/null +++ b/test/expected/LWN/0000764001.header.html @@ -0,0 +1,20 @@ + + + + + + + + +
+

[$] An introduction to the Julia language, part 2

+
([Development] Sep 4, 2018 15:57 UTC (Tue) (jake))
+
+
+ Part 1 of this series introduced the Julia project's goals and development process, along with the language syntax, including the basics of control flow, data types, and, in more detail, how to work with arrays. In this part, user-defined functions and the central concept of multiple dispatch are described. It will also survey Julia's module and package system, cover some syntax features, show how to make plots, and briefly dip into macros and distributed computing. +
+
+
+ diff --git a/test/expected/LWN/0000764001.html b/test/expected/LWN/0000764001.html new file mode 100644 index 0000000..cbf08fd --- /dev/null +++ b/test/expected/LWN/0000764001.html @@ -0,0 +1,25 @@ + + + + + + + + +
+

[$] An introduction to the Julia language, part 2

+
([Development] Sep 4, 2018 15:57 UTC (Tue) (jake))
+
+ +
+
+ [$] Sorry, this article is currently available to LWN suscribers only [https://lwn.net/subscribe/]. +
+
+
+ diff --git a/test/expected/LWN/0000764046 b/test/expected/LWN/0000764046 new file mode 100644 index 0000000..03dc5ce --- /dev/null +++ b/test/expected/LWN/0000764046 @@ -0,0 +1,490 @@ + SECURITY UPDATES FOR MONDAY + + [Security] Sep 3, 2018 15:41 UTC (Mon) (ris) + + o News link: https://lwn.net/Articles/764046 + o Source link: + + + Dist. + + ID + + Release + + Package + + Date + + Debian + + [1]DLA-1492-1 + + LTS + + dojo + + 2018-09-03 + + Debian + + [2]DLA-1487-1 + + LTS + + libtirpc + + 2018-08-31 + + Debian + + [3]DLA-1488-1 + + LTS + + mariadb-10.0 + + 2018-08-31 + + Debian + + [4]DLA-1490-1 + + LTS + + php5 + + 2018-09-01 + + Debian + + [5]DSA-4283-1 + + stable + + ruby-json-jwt + + 2018-08-31 + + Debian + + [6]DLA-1488-1 + + LTS + + spice + + 2018-08-31 + + Debian + + [7]DLA-1486-1 + + LTS + + spice + + 2018-09-01 + + Debian + + [8]DLA-1489-1 + + LTS + + spice-gtk + + 2018-09-01 + + Debian + + [9]DLA-1491-1 + + LTS + + tomcat8 + + 2018-09-02 + + Debian + + [10]DSA-4282-1 + + stable + + trafficserver + + 2018-08-31 + + Fedora + + [11]FEDORA-2018-33fef25ed1 + + F28 + + ghc-hakyll + + 2018-08-31 + + Fedora + + [12]FEDORA-2018-33fef25ed1 + + F28 + + ghc-hs-bibutils + + 2018-08-31 + + Fedora + + [13]FEDORA-2018-07083800ac + + F28 + + ghostscript + + 2018-09-02 + + Fedora + + [14]FEDORA-2018-77e610115a + + F28 + + mariadb + + 2018-08-31 + + Fedora + + [15]FEDORA-2018-33fef25ed1 + + F28 + + pandoc-citeproc + + 2018-08-31 + + Fedora + + [16]FEDORA-2018-f2b24ce26e + + F28 + + phpMyAdmin + + 2018-08-31 + + Fedora + + [17]FEDORA-2018-915602df63 + + F27 + + xen + + 2018-08-31 + + Mageia + + [18]MGASA-2018-0366 + + 6 + + java-1.8.0-openjdk + + 2018-09-02 + + Mageia + + [19]MGASA-2018-0361 + + 6 + + libarchive + + 2018-08-31 + + Mageia + + [20]MGASA-2018-0367 + + 6 + + libgd + + 2018-09-02 + + Mageia + + [21]MGASA-2018-0356 + + 6 + + libraw + + 2018-08-31 + + Mageia + + [22]MGASA-2018-0364 + + 6 + + libxcursor + + 2018-08-31 + + Mageia + + [23]MGASA-2018-0359 + + 5 + + mariadb + + 2018-08-31 + + Mageia + + [24]MGASA-2018-0355 + + 5, 6 + + mercurial + + 2018-08-31 + + Mageia + + [25]MGASA-2018-0363 + + 6 + + openssh + + 2018-08-31 + + Mageia + + [26]MGASA-2018-0365 + + 6 + + openssl + + 2018-09-02 + + Mageia + + [27]MGASA-2018-0358 + + 6 + + poppler + + 2018-08-31 + + Mageia + + [28]MGASA-2018-0362 + + 6 + + quazip + + 2018-08-31 + + Mageia + + [29]MGASA-2018-0357 + + 6 + + squirrelmail + + 2018-08-31 + + Mageia + + [30]MGASA-2018-0360 + + 6 + + virtualbox + + 2018-08-31 + + openSUSE + + [31]openSUSE-SU-2018:2590-1 + + 42.3 + + cobbler + + 2018-09-03 + + openSUSE + + [32]openSUSE-SU-2018:2592-1 + + 15.0 + + libressl + + 2018-09-03 + + openSUSE + + [33]openSUSE-SU-2018:2587-1 + + 42.3 + + wireshark + + 2018-09-02 + + openSUSE + + [34]openSUSE-SU-2018:2591-1 + + 15.0 42.3 + + zutils + + 2018-09-03 + + SUSE + + [35]SUSE-SU-2018:2576-1 + + OS7 + + OpenStack + + 2018-08-31 + + SUSE + + [36]SUSE-SU-2018:2578-1 + + OS7 + + couchdb + + 2018-08-31 + + SUSE + + [37]SUSE-SU-2018:2574-1 + + SLE11 + + java-1_7_0-ibm + + 2018-08-31 + + SUSE + + [38]SUSE-SU-2018:2583-1 + + SLE11 + + java-1_7_1-ibm + + 2018-08-31 + + SUSE + + [39]SUSE-SU-2018:2584-1 + + SLE12 + + spice + + 2018-08-31 + + + + [1] https://lwn.net/Articles/764007/ + + [2] https://lwn.net/Articles/764008/ + + [3] https://lwn.net/Articles/764009/ + + [4] https://lwn.net/Articles/764010/ + + [5] https://lwn.net/Articles/764011/ + + [6] https://lwn.net/Articles/764013/ + + [7] https://lwn.net/Articles/764012/ + + [8] https://lwn.net/Articles/764014/ + + [9] https://lwn.net/Articles/764015/ + + [10] https://lwn.net/Articles/764016/ + + [11] https://lwn.net/Articles/764017/ + + [12] https://lwn.net/Articles/764018/ + + [13] https://lwn.net/Articles/764019/ + + [14] https://lwn.net/Articles/764020/ + + [15] https://lwn.net/Articles/764021/ + + [16] https://lwn.net/Articles/764022/ + + [17] https://lwn.net/Articles/764023/ + + [18] https://lwn.net/Articles/764024/ + + [19] https://lwn.net/Articles/764025/ + + [20] https://lwn.net/Articles/764026/ + + [21] https://lwn.net/Articles/764027/ + + [22] https://lwn.net/Articles/764028/ + + [23] https://lwn.net/Articles/764029/ + + [24] https://lwn.net/Articles/764030/ + + [25] https://lwn.net/Articles/764031/ + + [26] https://lwn.net/Articles/764032/ + + [27] https://lwn.net/Articles/764033/ + + [28] https://lwn.net/Articles/764034/ + + [29] https://lwn.net/Articles/764035/ + + [30] https://lwn.net/Articles/764036/ + + [31] https://lwn.net/Articles/764037/ + + [32] https://lwn.net/Articles/764038/ + + [33] https://lwn.net/Articles/764039/ + + [34] https://lwn.net/Articles/764040/ + + [35] https://lwn.net/Articles/764044/ + + [36] https://lwn.net/Articles/764041/ + + [37] https://lwn.net/Articles/764042/ + + [38] https://lwn.net/Articles/764043/ + + [39] https://lwn.net/Articles/764045/ + + + diff --git a/test/expected/LWN/0000764046.header b/test/expected/LWN/0000764046.header new file mode 100644 index 0000000..095afeb --- /dev/null +++ b/test/expected/LWN/0000764046.header @@ -0,0 +1,13 @@ +0Security updates for Monday null/LWN/0000764046 70 +i [Security] Sep 3, 2018 15:41 UTC (Mon) (ris) +i +i Security updates have been issued by Debian (dojo, libtirpc, +i mariadb-10.0, php5, ruby-json-jwt, spice, spice-gtk, tomcat8, +i and trafficserver), Fedora (ghc-hakyll, ghc-hs-bibutils, +i ghostscript, mariadb, pandoc-citeproc, phpMyAdmin, and xen), +i Mageia (java-1.8.0-openjdk, libarchive, libgd, libraw, +i libxcursor, mariadb, mercurial, openssh, openssl, poppler, +i quazip, squirrelmail, and virtualbox), openSUSE (cobbler, +i libressl, wireshark, and zutils), and SUSE (couchdb, +i java-1_7_0-ibm, java-1_7_1-ibm, OpenStack, and spice). +i diff --git a/test/expected/LWN/0000764046.header.html b/test/expected/LWN/0000764046.header.html new file mode 100644 index 0000000..d42349b --- /dev/null +++ b/test/expected/LWN/0000764046.header.html @@ -0,0 +1,20 @@ + + + + + + + + +
+

Security updates for Monday

+
([Security] Sep 3, 2018 15:41 UTC (Mon) (ris))
+
+
+ Security updates have been issued by Debian (dojo, libtirpc, mariadb-10.0, php5, ruby-json-jwt, spice, spice-gtk, tomcat8, and trafficserver), Fedora (ghc-hakyll, ghc-hs-bibutils, ghostscript, mariadb, pandoc-citeproc, phpMyAdmin, and xen), Mageia (java-1.8.0-openjdk, libarchive, libgd, libraw, libxcursor, mariadb, mercurial, openssh, openssl, poppler, quazip, squirrelmail, and virtualbox), openSUSE (cobbler, libressl, wireshark, and zutils), and SUSE (couchdb, java-1_7_0-ibm, java-1_7_1-ibm, OpenStack, and spice). +
+
+
+ diff --git a/test/expected/LWN/0000764046.html b/test/expected/LWN/0000764046.html new file mode 100644 index 0000000..4384209 --- /dev/null +++ b/test/expected/LWN/0000764046.html @@ -0,0 +1,25 @@ + + + + + + + + +
+

Security updates for Monday

+
([Security] Sep 3, 2018 15:41 UTC (Mon) (ris))
+
+ +
+
+ Dist.

ID

Release

Package

Date

Debian

[1]DLA-1492-1

LTS

dojo

2018-09-03

Debian

[2]DLA-1487-1

LTS

libtirpc

2018-08-31

Debian

[3]DLA-1488-1

LTS

mariadb-10.0

2018-08-31

Debian

[4]DLA-1490-1

LTS

php5

2018-09-01

Debian

[5]DSA-4283-1

stable

ruby-json-jwt

2018-08-31

Debian

[6]DLA-1488-1

LTS

spice

2018-08-31

Debian

[7]DLA-1486-1

LTS

spice

2018-09-01

Debian

[8]DLA-1489-1

LTS

spice-gtk

2018-09-01

Debian

[9]DLA-1491-1

LTS

tomcat8

2018-09-02

Debian

[10]DSA-4282-1

stable

trafficserver

2018-08-31

Fedora

[11]FEDORA-2018-33fef25ed1

F28

ghc-hakyll

2018-08-31

Fedora

[12]FEDORA-2018-33fef25ed1

F28

ghc-hs-bibutils

2018-08-31

Fedora

[13]FEDORA-2018-07083800ac

F28

ghostscript

2018-09-02

Fedora

[14]FEDORA-2018-77e610115a

F28

mariadb

2018-08-31

Fedora

[15]FEDORA-2018-33fef25ed1

F28

pandoc-citeproc

2018-08-31

Fedora

[16]FEDORA-2018-f2b24ce26e

F28

phpMyAdmin

2018-08-31

Fedora

[17]FEDORA-2018-915602df63

F27

xen

2018-08-31

Mageia

[18]MGASA-2018-0366

6

java-1.8.0-openjdk

2018-09-02

Mageia

[19]MGASA-2018-0361

6

libarchive

2018-08-31

Mageia

[20]MGASA-2018-0367

6

libgd

2018-09-02

Mageia

[21]MGASA-2018-0356

6

libraw

2018-08-31

Mageia

[22]MGASA-2018-0364

6

libxcursor

2018-08-31

Mageia

[23]MGASA-2018-0359

5

mariadb

2018-08-31

Mageia

[24]MGASA-2018-0355

5, 6

mercurial

2018-08-31

Mageia

[25]MGASA-2018-0363

6

openssh

2018-08-31

Mageia

[26]MGASA-2018-0365

6

openssl

2018-09-02

Mageia

[27]MGASA-2018-0358

6

poppler

2018-08-31

Mageia

[28]MGASA-2018-0362

6

quazip

2018-08-31

Mageia

[29]MGASA-2018-0357

6

squirrelmail

2018-08-31

Mageia

[30]MGASA-2018-0360

6

virtualbox

2018-08-31

openSUSE

[31]openSUSE-SU-2018:2590-1

42.3

cobbler

2018-09-03

openSUSE

[32]openSUSE-SU-2018:2592-1

15.0

libressl

2018-09-03

openSUSE

[33]openSUSE-SU-2018:2587-1

42.3

wireshark

2018-09-02

openSUSE

[34]openSUSE-SU-2018:2591-1

15.0 42.3

zutils

2018-09-03

SUSE

[35]SUSE-SU-2018:2576-1

OS7

OpenStack

2018-08-31

SUSE

[36]SUSE-SU-2018:2578-1

OS7

couchdb

2018-08-31

SUSE

[37]SUSE-SU-2018:2574-1

SLE11

java-1_7_0-ibm

2018-08-31

SUSE

[38]SUSE-SU-2018:2583-1

SLE11

java-1_7_1-ibm

2018-08-31

SUSE

[39]SUSE-SU-2018:2584-1

SLE12

spice

2018-08-31



[1] https://lwn.net/Articles/764007/

[2] https://lwn.net/Articles/764008/

[3] https://lwn.net/Articles/764009/

[4] https://lwn.net/Articles/764010/

[5] https://lwn.net/Articles/764011/

[6] https://lwn.net/Articles/764013/

[7] https://lwn.net/Articles/764012/

[8] https://lwn.net/Articles/764014/

[9] https://lwn.net/Articles/764015/

[10] https://lwn.net/Articles/764016/

[11] https://lwn.net/Articles/764017/

[12] https://lwn.net/Articles/764018/

[13] https://lwn.net/Articles/764019/

[14] https://lwn.net/Articles/764020/

[15] https://lwn.net/Articles/764021/

[16] https://lwn.net/Articles/764022/

[17] https://lwn.net/Articles/764023/

[18] https://lwn.net/Articles/764024/

[19] https://lwn.net/Articles/764025/

[20] https://lwn.net/Articles/764026/

[21] https://lwn.net/Articles/764027/

[22] https://lwn.net/Articles/764028/

[23] https://lwn.net/Articles/764029/

[24] https://lwn.net/Articles/764030/

[25] https://lwn.net/Articles/764031/

[26] https://lwn.net/Articles/764032/

[27] https://lwn.net/Articles/764033/

[28] https://lwn.net/Articles/764034/

[29] https://lwn.net/Articles/764035/

[30] https://lwn.net/Articles/764036/

[31] https://lwn.net/Articles/764037/

[32] https://lwn.net/Articles/764038/

[33] https://lwn.net/Articles/764039/

[34] https://lwn.net/Articles/764040/

[35] https://lwn.net/Articles/764044/

[36] https://lwn.net/Articles/764041/

[37] https://lwn.net/Articles/764042/

[38] https://lwn.net/Articles/764043/

[39] https://lwn.net/Articles/764045/ +
+
+
+ diff --git a/test/expected/LWN/0000764048 b/test/expected/LWN/0000764048 new file mode 100644 index 0000000..2e2225d --- /dev/null +++ b/test/expected/LWN/0000764048 @@ -0,0 +1,13 @@ + [$] LIFE BEHIND THE TINFOIL CURTAIN + + [Security] Sep 5, 2018 22:11 UTC (Wed) (jake) + + o News link: https://lwn.net/Articles/764048 + o Source link: + + + [$] Sorry, this article is currently available to LWN + suscribers only [https://lwn.net/subscribe/]. + + + diff --git a/test/expected/LWN/0000764048.header b/test/expected/LWN/0000764048.header new file mode 100644 index 0000000..c44db41 --- /dev/null +++ b/test/expected/LWN/0000764048.header @@ -0,0 +1,13 @@ +0[$] Life behind the tinfoil curtain null/LWN/0000764048 70 +i [Security] Sep 5, 2018 22:11 UTC (Wed) (jake) +i +i Security and convenience rarely go hand-in-hand, but if your +i job (or life) requires extraordinary care against potentially +i targeted attacks, the security side of that tradeoff may win +i out. If so, running a system like Qubes OS on your desktop or +i CopperheadOS on your phone might make sense, which is just +i what Konstantin Ryabitsev, Linux Foundation (LF) director of +i IT security, has done. He reported on the experience in a talk +i [YouTube video] entitled "Life Behind the Tinfoil Curtain" at +i the 2018 Linux Security Summit North America. +i diff --git a/test/expected/LWN/0000764048.header.html b/test/expected/LWN/0000764048.header.html new file mode 100644 index 0000000..4c8a08e --- /dev/null +++ b/test/expected/LWN/0000764048.header.html @@ -0,0 +1,20 @@ + + + + + + + + +
+

[$] Life behind the tinfoil curtain

+
([Security] Sep 5, 2018 22:11 UTC (Wed) (jake))
+
+
+ Security and convenience rarely go hand-in-hand, but if your job (or life) requires extraordinary care against potentially targeted attacks, the security side of that tradeoff may win out. If so, running a system like Qubes OS on your desktop or CopperheadOS on your phone might make sense, which is just what Konstantin Ryabitsev, Linux Foundation (LF) director of IT security, has done. He reported on the experience in a talk [YouTube video] entitled "Life Behind the Tinfoil Curtain" at the 2018 Linux Security Summit North America. +
+
+
+ diff --git a/test/expected/LWN/0000764048.html b/test/expected/LWN/0000764048.html new file mode 100644 index 0000000..8675aee --- /dev/null +++ b/test/expected/LWN/0000764048.html @@ -0,0 +1,25 @@ + + + + + + + + +
+

[$] Life behind the tinfoil curtain

+
([Security] Sep 5, 2018 22:11 UTC (Wed) (jake))
+
+ +
+
+ [$] Sorry, this article is currently available to LWN suscribers only [https://lwn.net/subscribe/]. +
+
+
+ diff --git a/test/expected/LWN/0000764055 b/test/expected/LWN/0000764055 new file mode 100644 index 0000000..5136596 --- /dev/null +++ b/test/expected/LWN/0000764055 @@ -0,0 +1,239 @@ + TOPICS SOUGHT FOR THE KERNEL AND MAINTAINER SUMMITS + + [Kernel] Sep 3, 2018 19:07 UTC (Mon) (corbet) + + o News link: https://lwn.net/Articles/764055 + o Source link: + + + The annual Maintainer and Kernel Summits will be held in + Vancouver, BC on November 12 to 15, in conjunction with the + Linux Plumbers Conference. The program committee is looking + for topics for both summits; read on for details on how to + submit ideas and, perhaps, get an invitation to the Maintainer + Summit. + + From : + + "Theodore Y. Ts'o" + + To : + + linux-kernel-AT-vger.kernel.org, linux-fsdevel-AT-vger.kernel.- + org, linux-mm-AT-kvack.org, netdev-AT-vger.kernel.org, + linux-block-AT-vger.kernel.org + + Subject : + + Maintainer / Kernel Summit 2018 planning kick-off + + Date : + + Thu, 30 Aug 2018 17:35:17 -0400 + + Message-ID : + + <20180830213517.GA19110@thunk.org> + + Archive-link : + + [1]Article + + [ Feel free to forward this to other Linux kernel mailing + lists as + + appropriate -- Ted ] + + This year, the Maintainer and Kernel Summit will be in + Vancouver, + + B.C., November 12th -- 15th. The Maintainer's summit will be + held on + + Monday, November 12th, in Vancouver, immediately before the + Linux + + Plumber's Conference (LPC) November 13th -- 15th. + + For the past few years, before 2017, we've scheduled mostly + management + + and development process issues on the first day. We then + opened up + + the second day of the Kernel Summit to all attendees of the + conference + + with which the Kernel Summit has been colocated, and called it + the + + "Open Technical Day". This is something that just made sense + in order + + to assure that all of the necessary people needed to discuss a + + particular technical issue could be in the room. + + Starting last year in Prague, we took the next logical step, + and split + + the Kernel Summit in two. The "Maintainer's Summit" is an + + invite-only, half-day event, where the primary focus will be + process + + issues of Linux Kernel Development. It will be limited to 30 + invitees + + and a handful of sponsored attendees. This makes it smaller + than the + + first few kernel summits (which were limited to around 50 + attendees). + + The "Kernel Summit" is now organized as a track which is run in + + parallel with the other tracks at the Linux Plumber's + Conference, and + + is open to all registered attendees of Plumbers. Much as how we + + organized the Kernel Summit "open technical day" in 2016 in + Santa Fe, + + the Kernel Summit schedule will be synchronized with the other + tracks + + at the Plumber's Conference, and it will be open to all + registered + + Plumber's attendees. + + Linus has suggested the following ten people as the core of + the people + + he would like invited to the Maintainer's Summit, which was + calculated + + from statistics from his git tree. + + David Miller + + Dave Airlie + + Greg KH + + Arnd Bergmann + + Ingo Molnar + + Mauro Carvalho Chehab + + Takashi Iwai + + Thomas Gleixner + + Andrew Morton + + Olof Johansson + + As we did last year, there will be a mini-program committee + that will + + be pick enough names to bring the total number of 30 for the + + Maintainer's Summit. That program committee will consist of + Arnd + + Bergmann, Thomas Gleixner, Greg KH, Paul McKenney, and Ted + Ts'o. + + We will use the rest of names on the list generated by Linus's + script + + as a starting point of people to be considered. People who + suggest + + topics that should be discussed on the Maintainer's summit + will also + + be added to the list. To make topic suggestions for the + Maintainer's + + Summit, please send e-mail to the ksummit-discuss list with a + subject + + prefix of [MAINTAINERS SUMMIT]. + + The other job of the program committee will be to organize the + program + + for the Kernel Summit. The goal of the Kernel Summit track + will be to + + provide a forum to discuss specific technical issues that + would be + + easier to resolve in person than over e-mail. The program + committee + + will also consider "information sharing" topics if they are + clearly of + + interest to the wider development community (i.e., advanced + training + + in topics that would be useful to kernel developers). + + To suggest a topic for the Kernel Summit, please tag your + e-mail with + + [TECH TOPIC]. As before, please use a separate e-mail for each + topic, + + and send the topic suggestions to: + + ksummit-discuss@lists.linuxfoundation.org + + People who submit topic suggestions before September 21st and + which + + are accepted, will be given a free admission to the Linux + Plumbers + + Conference. + + We will reserving roughly half the Kernel Summit slots for + last-minute + + discussions that will be scheduled during the week of + Plumber's, in an + + "unconference style". This was extremely popular in Santa Fe + and in + + Prague, since it allowed ideas that came up in hallway + discussions, + + and in Plumber's Miniconference, to be given scheduled, + dedicated + + times for that discussion. + + If you were not subscribed on to the kernel-discuss mailing + list from + + last year (or if you had removed yourself after the kernel + summit), + + you can subscribe to the discuss list using mailman: + + https://lists.linuxfoundation.org/mailman/listinfo/ksummi... + + + + [1] https://lwn.net/ml/linux-kernel/20180830213517.GA19110@thu- + nk.org + + + diff --git a/test/expected/LWN/0000764055.header b/test/expected/LWN/0000764055.header new file mode 100644 index 0000000..0b2a9b6 --- /dev/null +++ b/test/expected/LWN/0000764055.header @@ -0,0 +1,10 @@ +0Topics sought for the Kernel and Maintainer Summits null/LWN/0000764055 70 +i [Kernel] Sep 3, 2018 19:07 UTC (Mon) (corbet) +i +i The annual Maintainer and Kernel Summits will be held in +i Vancouver, BC on November 12 to 15, in conjunction with the +i Linux Plumbers Conference. The program committee is looking +i for topics for both summits; read on for details on how to +i submit ideas and, perhaps, get an invitation to the Maintainer +i Summit. +i diff --git a/test/expected/LWN/0000764055.header.html b/test/expected/LWN/0000764055.header.html new file mode 100644 index 0000000..359a99c --- /dev/null +++ b/test/expected/LWN/0000764055.header.html @@ -0,0 +1,20 @@ + + + + + + + + +
+

Topics sought for the Kernel and Maintainer Summits

+
([Kernel] Sep 3, 2018 19:07 UTC (Mon) (corbet))
+
+
+ The annual Maintainer and Kernel Summits will be held in Vancouver, BC on November 12 to 15, in conjunction with the Linux Plumbers Conference. The program committee is looking for topics for both summits; read on for details on how to submit ideas and, perhaps, get an invitation to the Maintainer Summit. +
+
+
+ diff --git a/test/expected/LWN/0000764055.html b/test/expected/LWN/0000764055.html new file mode 100644 index 0000000..14008c9 --- /dev/null +++ b/test/expected/LWN/0000764055.html @@ -0,0 +1,25 @@ + + + + + + + + +
+

Topics sought for the Kernel and Maintainer Summits

+
([Kernel] Sep 3, 2018 19:07 UTC (Mon) (corbet))
+
+ +
+
+ The annual Maintainer and Kernel Summits will be held in Vancouver, BC on November 12 to 15, in conjunction with the Linux Plumbers Conference. The program committee is looking for topics for both summits; read on for details on how to submit ideas and, perhaps, get an invitation to the Maintainer Summit.

From :

"Theodore Y. Ts'o" <tytso-AT-mit.edu>

To :

linux-kernel-AT-vger.kernel.org, linux-fsdevel-AT-vger.kernel.org, linux-mm-AT-kvack.org, netdev-AT-vger.kernel.org, linux-block-AT-vger.kernel.org

Subject :

Maintainer / Kernel Summit 2018 planning kick-off

Date :

Thu, 30 Aug 2018 17:35:17 -0400

Message-ID :

<20180830213517.GA19110@thunk.org>

Archive-link :

[1]Article

Feel free to forward this to other Linux kernel mailing lists as

appropriate -- Ted



This year, the Maintainer and Kernel Summit will be in Vancouver,

B.C., November 12th -- 15th. The Maintainer's summit will be held on

Monday, November 12th, in Vancouver, immediately before the Linux

Plumber's Conference (LPC) November 13th -- 15th.

For the past few years, before 2017, we've scheduled mostly management

and development process issues on the first day. We then opened up

the second day of the Kernel Summit to all attendees of the conference

with which the Kernel Summit has been colocated, and called it the

"Open Technical Day". This is something that just made sense in order

to assure that all of the necessary people needed to discuss a

particular technical issue could be in the room.

Starting last year in Prague, we took the next logical step, and split

the Kernel Summit in two. The "Maintainer's Summit" is an

invite-only, half-day event, where the primary focus will be process

issues of Linux Kernel Development. It will be limited to 30 invitees

and a handful of sponsored attendees. This makes it smaller than the

first few kernel summits (which were limited to around 50 attendees).

The "Kernel Summit" is now organized as a track which is run in

parallel with the other tracks at the Linux Plumber's Conference, and

is open to all registered attendees of Plumbers. Much as how we

organized the Kernel Summit "open technical day" in 2016 in Santa Fe,

the Kernel Summit schedule will be synchronized with the other tracks

at the Plumber's Conference, and it will be open to all registered

Plumber's attendees.

Linus has suggested the following ten people as the core of the people

he would like invited to the Maintainer's Summit, which was calculated

from statistics from his git tree.

David Miller

Dave Airlie

Greg KH

Arnd Bergmann

Ingo Molnar

Mauro Carvalho Chehab

Takashi Iwai

Thomas Gleixner

Andrew Morton

Olof Johansson

As we did last year, there will be a mini-program committee that will

be pick enough names to bring the total number of 30 for the

Maintainer's Summit. That program committee will consist of Arnd

Bergmann, Thomas Gleixner, Greg KH, Paul McKenney, and Ted Ts'o.

We will use the rest of names on the list generated by Linus's script

as a starting point of people to be considered. People who suggest

topics that should be discussed on the Maintainer's summit will also

be added to the list. To make topic suggestions for the Maintainer's

Summit, please send e-mail to the ksummit-discuss list with a subject

prefix of [MAINTAINERS SUMMIT].

The other job of the program committee will be to organize the program

for the Kernel Summit. The goal of the Kernel Summit track will be to

provide a forum to discuss specific technical issues that would be

easier to resolve in person than over e-mail. The program committee

will also consider "information sharing" topics if they are clearly of

interest to the wider development community (i.e., advanced training

in topics that would be useful to kernel developers).

To suggest a topic for the Kernel Summit, please tag your e-mail with

[TECH TOPIC]. As before, please use a separate e-mail for each topic,

and send the topic suggestions to:

ksummit-discuss@lists.linuxfoundation.org

People who submit topic suggestions before September 21st and which

are accepted, will be given a free admission to the Linux Plumbers

Conference.

We will reserving roughly half the Kernel Summit slots for last-minute

discussions that will be scheduled during the week of Plumber's, in an

"unconference style". This was extremely popular in Santa Fe and in

Prague, since it allowed ideas that came up in hallway discussions,

and in Plumber's Miniconference, to be given scheduled, dedicated

times for that discussion.

If you were not subscribed on to the kernel-discuss mailing list from

last year (or if you had removed yourself after the kernel summit),

you can subscribe to the discuss list using mailman:

https://lists.linuxfoundation.org/mailman/listinfo/ksummi...



[1] https://lwn.net/ml/linux-kernel/20180830213517.GA19110@thunk.org +
+
+
+ diff --git a/test/expected/LWN/0000764057 b/test/expected/LWN/0000764057 new file mode 100644 index 0000000..58a24b1 --- /dev/null +++ b/test/expected/LWN/0000764057 @@ -0,0 +1,13 @@ + [$] IDA: SIMPLIFYING THE COMPLEX TASK OF ALLOCATING INTEGERS + + [Kernel] Sep 4, 2018 0:15 UTC (Tue) (corbet) + + o News link: https://lwn.net/Articles/764057 + o Source link: + + + [$] Sorry, this article is currently available to LWN + suscribers only [https://lwn.net/subscribe/]. + + + diff --git a/test/expected/LWN/0000764057.header b/test/expected/LWN/0000764057.header new file mode 100644 index 0000000..eb103f9 --- /dev/null +++ b/test/expected/LWN/0000764057.header @@ -0,0 +1,16 @@ +0[$] IDA: simplifying the complex task of allocating integers null/LWN/0000764057 70 +i [Kernel] Sep 4, 2018 0:15 UTC (Tue) (corbet) +i +i It is common for kernel code to generate unique integers for +i identifiers. When one plugs in a flash drive, it will show up +i as /dev/sdN; that N (a letter derived from a number) must be +i generated in the kernel, and it should not already be in use +i for another drive or unpleasant things will happen. One might +i think that generating such numbers would not be a difficult +i task, but that turns out not to be the case, especially in +i situations where many numbers must be tracked. The IDA (for +i "ID allocator", perhaps) API exists to handle this specialized +i task. In past kernels, it has managed to make the process of +i getting an unused number surprisingly complex; the 4.19 kernel +i has a new IDA API that simplifies things considerably. +i diff --git a/test/expected/LWN/0000764057.header.html b/test/expected/LWN/0000764057.header.html new file mode 100644 index 0000000..80f1739 --- /dev/null +++ b/test/expected/LWN/0000764057.header.html @@ -0,0 +1,20 @@ + + + + + + + + +
+

[$] IDA: simplifying the complex task of allocating integers

+
([Kernel] Sep 4, 2018 0:15 UTC (Tue) (corbet))
+
+
+ It is common for kernel code to generate unique integers for identifiers. When one plugs in a flash drive, it will show up as /dev/sdN; that N (a letter derived from a number) must be generated in the kernel, and it should not already be in use for another drive or unpleasant things will happen. One might think that generating such numbers would not be a difficult task, but that turns out not to be the case, especially in situations where many numbers must be tracked. The IDA (for "ID allocator", perhaps) API exists to handle this specialized task. In past kernels, it has managed to make the process of getting an unused number surprisingly complex; the 4.19 kernel has a new IDA API that simplifies things considerably. +
+
+
+ diff --git a/test/expected/LWN/0000764057.html b/test/expected/LWN/0000764057.html new file mode 100644 index 0000000..cba93c8 --- /dev/null +++ b/test/expected/LWN/0000764057.html @@ -0,0 +1,25 @@ + + + + + + + + +
+

[$] IDA: simplifying the complex task of allocating integers

+
([Kernel] Sep 4, 2018 0:15 UTC (Tue) (corbet))
+
+ +
+
+ [$] Sorry, this article is currently available to LWN suscribers only [https://lwn.net/subscribe/]. +
+
+
+ diff --git a/test/expected/LWN/0000764130 b/test/expected/LWN/0000764130 new file mode 100644 index 0000000..a01bd05 --- /dev/null +++ b/test/expected/LWN/0000764130 @@ -0,0 +1,263 @@ + SECURITY UPDATES FOR TUESDAY + + [Security] Sep 4, 2018 15:14 UTC (Tue) (ris) + + o News link: https://lwn.net/Articles/764130 + o Source link: + + + Dist. + + ID + + Release + + Package + + Date + + openSUSE + + [1]openSUSE-SU-2018:2600-1 + + 15.0 + + ImageMagick + + 2018-09-04 + + openSUSE + + [2]openSUSE-SU-2018:2597-1 + + 42.3 + + libressl + + 2018-09-04 + + openSUSE + + [3]openSUSE-SU-2018:2599-1 + + 15.0 + + postgresql10 + + 2018-09-04 + + openSUSE + + [4]openSUSE-SU-2018:2598-1 + + 15.0 + + spice + + 2018-09-04 + + openSUSE + + [5]openSUSE-SU-2018:2602-1 + + 42.3 + + spice + + 2018-09-04 + + openSUSE + + [6]openSUSE-SU-2018:2601-1 + + 42.3 + + spice-gtk + + 2018-09-04 + + Red Hat + + [7]RHSA-2018:2616-01 + + EL7 + + RHGS WA + + 2018-09-04 + + Red Hat + + [8]RHSA-2018:2608-01 + + EL6 + + Red Hat Gluster Storage + + 2018-09-04 + + Red Hat + + [9]RHSA-2018:2607-01 + + EL7 + + Red Hat Gluster Storage + + 2018-09-04 + + Red Hat + + [10]RHSA-2018:2626-01 + + EL7 + + Red Hat Virtualization + + 2018-09-04 + + Red Hat + + [11]RHSA-2018:2615-01 + + EL7 + + collectd + + 2018-09-04 + + Red Hat + + [12]RHSA-2018:2645-01 + + EL6.7 + + kernel + + 2018-09-04 + + Red Hat + + [13]RHSA-2018:2643-01 + + EL7 + + rhvm-appliance + + 2018-09-04 + + Red Hat + + [14]RHSA-2018:2612-01 + + EL6 + + samba + + 2018-09-04 + + Red Hat + + [15]RHSA-2018:2613-01 + + EL7 + + samba + + 2018-09-04 + + SUSE + + [16]SUSE-SU-2018:2603-1 + + OS7 + + crowbar, crowbar-core, crowbar-ha, crowbar-openstack, + crowbar-ui + + 2018-09-04 + + SUSE + + [17]SUSE-SU-2018:2596-1 + + SLE12 + + kernel + + 2018-09-03 + + SUSE + + [18]SUSE-SU-2018:2595-1 + + SLE12 + + spice + + 2018-09-03 + + SUSE + + [19]SUSE-SU-2018:2594-1 + + SLE12 + + spice-gtk + + 2018-09-03 + + SUSE + + [20]SUSE-SU-2018:2593-1 + + SLE12 + + spice-gtk + + 2018-09-03 + + + + [1] https://lwn.net/Articles/764119/ + + [2] https://lwn.net/Articles/764120/ + + [3] https://lwn.net/Articles/764121/ + + [4] https://lwn.net/Articles/764122/ + + [5] https://lwn.net/Articles/764123/ + + [6] https://lwn.net/Articles/764124/ + + [7] https://lwn.net/Articles/764115/ + + [8] https://lwn.net/Articles/764113/ + + [9] https://lwn.net/Articles/764112/ + + [10] https://lwn.net/Articles/764114/ + + [11] https://lwn.net/Articles/764110/ + + [12] https://lwn.net/Articles/764111/ + + [13] https://lwn.net/Articles/764116/ + + [14] https://lwn.net/Articles/764117/ + + [15] https://lwn.net/Articles/764118/ + + [16] https://lwn.net/Articles/764125/ + + [17] https://lwn.net/Articles/764126/ + + [18] https://lwn.net/Articles/764127/ + + [19] https://lwn.net/Articles/764129/ + + [20] https://lwn.net/Articles/764128/ + + + diff --git a/test/expected/LWN/0000764130.header b/test/expected/LWN/0000764130.header new file mode 100644 index 0000000..af3b6b3 --- /dev/null +++ b/test/expected/LWN/0000764130.header @@ -0,0 +1,10 @@ +0Security updates for Tuesday null/LWN/0000764130 70 +i [Security] Sep 4, 2018 15:14 UTC (Tue) (ris) +i +i Security updates have been issued by openSUSE (ImageMagick, +i libressl, postgresql10, spice, and spice-gtk), Red Hat +i (collectd, kernel, Red Hat Gluster Storage, Red Hat +i Virtualization, RHGS WA, rhvm-appliance, and samba), and SUSE +i (crowbar, crowbar-core, crowbar-ha, crowbar-openstack, +i crowbar-ui, kernel, spice, and spice-gtk). +i diff --git a/test/expected/LWN/0000764130.header.html b/test/expected/LWN/0000764130.header.html new file mode 100644 index 0000000..792d886 --- /dev/null +++ b/test/expected/LWN/0000764130.header.html @@ -0,0 +1,20 @@ + + + + + + + + +
+

Security updates for Tuesday

+
([Security] Sep 4, 2018 15:14 UTC (Tue) (ris))
+
+
+ Security updates have been issued by openSUSE (ImageMagick, libressl, postgresql10, spice, and spice-gtk), Red Hat (collectd, kernel, Red Hat Gluster Storage, Red Hat Virtualization, RHGS WA, rhvm-appliance, and samba), and SUSE (crowbar, crowbar-core, crowbar-ha, crowbar-openstack, crowbar-ui, kernel, spice, and spice-gtk). +
+
+
+ diff --git a/test/expected/LWN/0000764130.html b/test/expected/LWN/0000764130.html new file mode 100644 index 0000000..cd24a80 --- /dev/null +++ b/test/expected/LWN/0000764130.html @@ -0,0 +1,25 @@ + + + + + + + + +
+

Security updates for Tuesday

+
([Security] Sep 4, 2018 15:14 UTC (Tue) (ris))
+
+ +
+
+ Dist.

ID

Release

Package

Date

openSUSE

[1]openSUSE-SU-2018:2600-1

15.0

ImageMagick

2018-09-04

openSUSE

[2]openSUSE-SU-2018:2597-1

42.3

libressl

2018-09-04

openSUSE

[3]openSUSE-SU-2018:2599-1

15.0

postgresql10

2018-09-04

openSUSE

[4]openSUSE-SU-2018:2598-1

15.0

spice

2018-09-04

openSUSE

[5]openSUSE-SU-2018:2602-1

42.3

spice

2018-09-04

openSUSE

[6]openSUSE-SU-2018:2601-1

42.3

spice-gtk

2018-09-04

Red Hat

[7]RHSA-2018:2616-01

EL7

RHGS WA

2018-09-04

Red Hat

[8]RHSA-2018:2608-01

EL6

Red Hat Gluster Storage

2018-09-04

Red Hat

[9]RHSA-2018:2607-01

EL7

Red Hat Gluster Storage

2018-09-04

Red Hat

[10]RHSA-2018:2626-01

EL7

Red Hat Virtualization

2018-09-04

Red Hat

[11]RHSA-2018:2615-01

EL7

collectd

2018-09-04

Red Hat

[12]RHSA-2018:2645-01

EL6.7

kernel

2018-09-04

Red Hat

[13]RHSA-2018:2643-01

EL7

rhvm-appliance

2018-09-04

Red Hat

[14]RHSA-2018:2612-01

EL6

samba

2018-09-04

Red Hat

[15]RHSA-2018:2613-01

EL7

samba

2018-09-04

SUSE

[16]SUSE-SU-2018:2603-1

OS7

crowbar, crowbar-core, crowbar-ha, crowbar-openstack, crowbar-ui

2018-09-04

SUSE

[17]SUSE-SU-2018:2596-1

SLE12

kernel

2018-09-03

SUSE

[18]SUSE-SU-2018:2595-1

SLE12

spice

2018-09-03

SUSE

[19]SUSE-SU-2018:2594-1

SLE12

spice-gtk

2018-09-03

SUSE

[20]SUSE-SU-2018:2593-1

SLE12

spice-gtk

2018-09-03



[1] https://lwn.net/Articles/764119/

[2] https://lwn.net/Articles/764120/

[3] https://lwn.net/Articles/764121/

[4] https://lwn.net/Articles/764122/

[5] https://lwn.net/Articles/764123/

[6] https://lwn.net/Articles/764124/

[7] https://lwn.net/Articles/764115/

[8] https://lwn.net/Articles/764113/

[9] https://lwn.net/Articles/764112/

[10] https://lwn.net/Articles/764114/

[11] https://lwn.net/Articles/764110/

[12] https://lwn.net/Articles/764111/

[13] https://lwn.net/Articles/764116/

[14] https://lwn.net/Articles/764117/

[15] https://lwn.net/Articles/764118/

[16] https://lwn.net/Articles/764125/

[17] https://lwn.net/Articles/764126/

[18] https://lwn.net/Articles/764127/

[19] https://lwn.net/Articles/764129/

[20] https://lwn.net/Articles/764128/ +
+
+
+ diff --git a/test/expected/LWN/0000764131 b/test/expected/LWN/0000764131 new file mode 100644 index 0000000..94161c5 --- /dev/null +++ b/test/expected/LWN/0000764131 @@ -0,0 +1,13 @@ + [$] LEARNING ABOUT GO INTERNALS AT GOPHERCON + + [Front] Sep 5, 2018 19:20 UTC (Wed) (jake) + + o News link: https://lwn.net/Articles/764131 + o Source link: + + + [$] Sorry, this article is currently available to LWN + suscribers only [https://lwn.net/subscribe/]. + + + diff --git a/test/expected/LWN/0000764131.header b/test/expected/LWN/0000764131.header new file mode 100644 index 0000000..402f3cd --- /dev/null +++ b/test/expected/LWN/0000764131.header @@ -0,0 +1,14 @@ +0[$] Learning about Go internals at GopherCon null/LWN/0000764131 70 +i [Front] Sep 5, 2018 19:20 UTC (Wed) (jake) +i +i GopherCon is the major conference for the Go language, +i attended by 1600 dedicated "gophers", as the members of its +i community like to call themselves. Held for the last five +i years in Denver, it attracts programmers, open-source +i contributors, and technical managers from all over North +i America and the world. GopherCon's highly-technical program is +i an intense mix of Go internals and programming tutorials, a +i few of which we will explore in this article. Subscribers can +i read on for a report from GopherCon by guest author Josh +i Berkus. +i diff --git a/test/expected/LWN/0000764131.header.html b/test/expected/LWN/0000764131.header.html new file mode 100644 index 0000000..763ec98 --- /dev/null +++ b/test/expected/LWN/0000764131.header.html @@ -0,0 +1,20 @@ + + + + + + + + +
+

[$] Learning about Go internals at GopherCon

+
([Front] Sep 5, 2018 19:20 UTC (Wed) (jake))
+
+
+ GopherCon is the major conference for the Go language, attended by 1600 dedicated "gophers", as the members of its community like to call themselves. Held for the last five years in Denver, it attracts programmers, open-source contributors, and technical managers from all over North America and the world. GopherCon's highly-technical program is an intense mix of Go internals and programming tutorials, a few of which we will explore in this article. Subscribers can read on for a report from GopherCon by guest author Josh Berkus. +
+
+
+ diff --git a/test/expected/LWN/0000764131.html b/test/expected/LWN/0000764131.html new file mode 100644 index 0000000..ca9872f --- /dev/null +++ b/test/expected/LWN/0000764131.html @@ -0,0 +1,25 @@ + + + + + + + + +
+

[$] Learning about Go internals at GopherCon

+
([Front] Sep 5, 2018 19:20 UTC (Wed) (jake))
+
+ +
+
+ [$] Sorry, this article is currently available to LWN suscribers only [https://lwn.net/subscribe/]. +
+
+
+ diff --git a/test/expected/LWN/0000764182 b/test/expected/LWN/0000764182 new file mode 100644 index 0000000..4a44393 --- /dev/null +++ b/test/expected/LWN/0000764182 @@ -0,0 +1,92 @@ + SECURITY UPDATES FOR WEDNESDAY + + [Security] Sep 5, 2018 15:01 UTC (Wed) (ris) + + o News link: https://lwn.net/Articles/764182 + o Source link: + + + Dist. + + ID + + Release + + Package + + Date + + Debian + + [1]DSA-4284-1 + + stable + + lcms2 + + 2018-09-04 + + openSUSE + + [2]openSUSE-SU-2018:2623-1 + + 42.3 + + yubico-piv-tool + + 2018-09-05 + + Oracle + + [3]ELSA-2018-4208 + + OL6 + + kernel + + 2018-09-04 + + Oracle + + [4]ELSA-2018-4208 + + OL7 + + kernel + + 2018-09-04 + + SUSE + + [5]SUSE-SU-2018:2608-1 + + cobbler + + 2018-09-04 + + SUSE + + [6]SUSE-SU-2018:2615-1 + + SLE11 + + kvm + + 2018-09-05 + + + + [1] https://lwn.net/Articles/764176/ + + [2] https://lwn.net/Articles/764177/ + + [3] https://lwn.net/Articles/764178/ + + [4] https://lwn.net/Articles/764179/ + + [5] https://lwn.net/Articles/764180/ + + [6] https://lwn.net/Articles/764181/ + + + diff --git a/test/expected/LWN/0000764182.header b/test/expected/LWN/0000764182.header new file mode 100644 index 0000000..feedb1b --- /dev/null +++ b/test/expected/LWN/0000764182.header @@ -0,0 +1,7 @@ +0Security updates for Wednesday null/LWN/0000764182 70 +i [Security] Sep 5, 2018 15:01 UTC (Wed) (ris) +i +i Security updates have been issued by Debian (lcms2), openSUSE +i (yubico-piv-tool), Oracle (kernel), and SUSE (cobbler and +i kvm). +i diff --git a/test/expected/LWN/0000764182.header.html b/test/expected/LWN/0000764182.header.html new file mode 100644 index 0000000..4958638 --- /dev/null +++ b/test/expected/LWN/0000764182.header.html @@ -0,0 +1,20 @@ + + + + + + + + +
+

Security updates for Wednesday

+
([Security] Sep 5, 2018 15:01 UTC (Wed) (ris))
+
+
+ Security updates have been issued by Debian (lcms2), openSUSE (yubico-piv-tool), Oracle (kernel), and SUSE (cobbler and kvm). +
+
+
+ diff --git a/test/expected/LWN/0000764182.html b/test/expected/LWN/0000764182.html new file mode 100644 index 0000000..76b7e83 --- /dev/null +++ b/test/expected/LWN/0000764182.html @@ -0,0 +1,25 @@ + + + + + + + + +
+

Security updates for Wednesday

+
([Security] Sep 5, 2018 15:01 UTC (Wed) (ris))
+
+ +
+
+ Dist.

ID

Release

Package

Date

Debian

[1]DSA-4284-1

stable

lcms2

2018-09-04

openSUSE

[2]openSUSE-SU-2018:2623-1

42.3

yubico-piv-tool

2018-09-05

Oracle

[3]ELSA-2018-4208

OL6

kernel

2018-09-04

Oracle

[4]ELSA-2018-4208

OL7

kernel

2018-09-04

SUSE

[5]SUSE-SU-2018:2608-1

cobbler

2018-09-04

SUSE

[6]SUSE-SU-2018:2615-1

SLE11

kvm

2018-09-05



[1] https://lwn.net/Articles/764176/

[2] https://lwn.net/Articles/764177/

[3] https://lwn.net/Articles/764178/

[4] https://lwn.net/Articles/764179/

[5] https://lwn.net/Articles/764180/

[6] https://lwn.net/Articles/764181/ +
+
+
+ diff --git a/test/expected/LWN/0000764184 b/test/expected/LWN/0000764184 new file mode 100644 index 0000000..ff6288f --- /dev/null +++ b/test/expected/LWN/0000764184 @@ -0,0 +1,26 @@ + A SET OF STABLE KERNELS + + [Kernel] Sep 5, 2018 15:15 UTC (Wed) (ris) + + o News link: https://lwn.net/Articles/764184/ + o Source link: + + + Greg Kroah-Hartman has released stable kernels [1]4.18.6 , + [2]4.14.68 , [3]4.9.125 , [4]4.4.154 , and [5]3.18.121 . They + all contain important fixes and users should upgrade. + + + + [1] https://lwn.net/Articles/764185/ + + [2] https://lwn.net/Articles/764186/ + + [3] https://lwn.net/Articles/764187/ + + [4] https://lwn.net/Articles/764188/ + + [5] https://lwn.net/Articles/764189/ + + + diff --git a/test/expected/LWN/0000764184.header b/test/expected/LWN/0000764184.header new file mode 100644 index 0000000..91c215e --- /dev/null +++ b/test/expected/LWN/0000764184.header @@ -0,0 +1,7 @@ +0A set of stable kernels null/LWN/0000764184 70 +i [Kernel] Sep 5, 2018 15:15 UTC (Wed) (ris) +i +i Greg Kroah-Hartman has released stable kernels 4.18.6, +i 4.14.68, 4.9.125, 4.4.154, and 3.18.121. They all contain +i important fixes and users should upgrade. +i diff --git a/test/expected/LWN/0000764184.header.html b/test/expected/LWN/0000764184.header.html new file mode 100644 index 0000000..7c47529 --- /dev/null +++ b/test/expected/LWN/0000764184.header.html @@ -0,0 +1,20 @@ + + + + + + + + +
+

A set of stable kernels

+
([Kernel] Sep 5, 2018 15:15 UTC (Wed) (ris))
+
+
+ Greg Kroah-Hartman has released stable kernels 4.18.6, 4.14.68, 4.9.125, 4.4.154, and 3.18.121. They all contain important fixes and users should upgrade. +
+
+
+ diff --git a/test/expected/LWN/0000764184.html b/test/expected/LWN/0000764184.html new file mode 100644 index 0000000..94aed84 --- /dev/null +++ b/test/expected/LWN/0000764184.html @@ -0,0 +1,25 @@ + + + + + + + + +
+

A set of stable kernels

+
([Kernel] Sep 5, 2018 15:15 UTC (Wed) (ris))
+
+ +
+
+ Greg Kroah-Hartman has released stable kernels [1]4.18.6 , [2]4.14.68 , [3]4.9.125 , [4]4.4.154 , and [5]3.18.121 . They all contain important fixes and users should upgrade.



[1] https://lwn.net/Articles/764185/

[2] https://lwn.net/Articles/764186/

[3] https://lwn.net/Articles/764187/

[4] https://lwn.net/Articles/764188/

[5] https://lwn.net/Articles/764189/ +
+
+
+ diff --git a/test/expected/LWN/0000764200 b/test/expected/LWN/0000764200 new file mode 100644 index 0000000..20882cb --- /dev/null +++ b/test/expected/LWN/0000764200 @@ -0,0 +1,13 @@ + [$] WRITING NETWORK FLOW DISSECTORS IN BPF + + [Kernel] Sep 6, 2018 15:59 UTC (Thu) (corbet) + + o News link: https://lwn.net/Articles/764200 + o Source link: + + + [$] Sorry, this article is currently available to LWN + suscribers only [https://lwn.net/subscribe/]. + + + diff --git a/test/expected/LWN/0000764200.header b/test/expected/LWN/0000764200.header new file mode 100644 index 0000000..9cd70e2 --- /dev/null +++ b/test/expected/LWN/0000764200.header @@ -0,0 +1,13 @@ +0[$] Writing network flow dissectors in BPF null/LWN/0000764200 70 +i [Kernel] Sep 6, 2018 15:59 UTC (Thu) (corbet) +i +i Network packet headers contain a great deal of information, +i but the kernel often only needs a subset of that information +i to be able to perform filtering or associate any given packet +i with a flow. The piece of code that follows the different +i layers of packet encapsulation to find the important data is +i called a flow dissector. In current Linux kernels, the flow +i dissector is written in C. A patch set has been proposed +i recently to implement it in BPF with the clear goal of +i improving security, flexibility, and maybe even performance. +i diff --git a/test/expected/LWN/0000764200.header.html b/test/expected/LWN/0000764200.header.html new file mode 100644 index 0000000..cfcf95a --- /dev/null +++ b/test/expected/LWN/0000764200.header.html @@ -0,0 +1,20 @@ + + + + + + + + +
+

[$] Writing network flow dissectors in BPF

+
([Kernel] Sep 6, 2018 15:59 UTC (Thu) (corbet))
+
+
+ Network packet headers contain a great deal of information, but the kernel often only needs a subset of that information to be able to perform filtering or associate any given packet with a flow. The piece of code that follows the different layers of packet encapsulation to find the important data is called a flow dissector. In current Linux kernels, the flow dissector is written in C. A patch set has been proposed recently to implement it in BPF with the clear goal of improving security, flexibility, and maybe even performance. +
+
+
+ diff --git a/test/expected/LWN/0000764200.html b/test/expected/LWN/0000764200.html new file mode 100644 index 0000000..800444f --- /dev/null +++ b/test/expected/LWN/0000764200.html @@ -0,0 +1,25 @@ + + + + + + + + +
+

[$] Writing network flow dissectors in BPF

+
([Kernel] Sep 6, 2018 15:59 UTC (Thu) (corbet))
+
+ +
+
+ [$] Sorry, this article is currently available to LWN suscribers only [https://lwn.net/subscribe/]. +
+
+
+ diff --git a/test/expected/LWN/0000764202 b/test/expected/LWN/0000764202 new file mode 100644 index 0000000..b690a82 --- /dev/null +++ b/test/expected/LWN/0000764202 @@ -0,0 +1,23 @@ + FIREFOX 62.0 RELEASED + + [Development] Sep 5, 2018 17:31 UTC (Wed) (ris) + + o News link: https://lwn.net/Articles/764202/ + o Source link: + + + Mozilla has released Firefox 62.0, with several new features. + The Firefox Home (default New Tab) allows users to display up + to 4 rows of top sites, Pocket stories, and highlights; for + those using containers there is menu option to reopen a tab in + a different container; Firefox 63 will remove all trust for + Symantec-issued certificates, and it is optional in Firefox + 62; FreeBSD support for WebAuthn was added; and more. See the + [1]release notes for details. + + + + [1] https://www.mozilla.org/en-US/firefox/62.0/releasenotes/ + + + diff --git a/test/expected/LWN/0000764202.header b/test/expected/LWN/0000764202.header new file mode 100644 index 0000000..f8dc850 --- /dev/null +++ b/test/expected/LWN/0000764202.header @@ -0,0 +1,12 @@ +0Firefox 62.0 released null/LWN/0000764202 70 +i [Development] Sep 5, 2018 17:31 UTC (Wed) (ris) +i +i Mozilla has released Firefox 62.0, with several new features. +i The Firefox Home (default New Tab) allows users to display up +i to 4 rows of top sites, Pocket stories, and highlights; for +i those using containers there is menu option to reopen a tab in +i a different container; Firefox 63 will remove all trust for +i Symantec-issued certificates, and it is optional in Firefox +i 62; FreeBSD support for WebAuthn was added; and more. See the +i release notes for details. +i diff --git a/test/expected/LWN/0000764202.header.html b/test/expected/LWN/0000764202.header.html new file mode 100644 index 0000000..06e752e --- /dev/null +++ b/test/expected/LWN/0000764202.header.html @@ -0,0 +1,20 @@ + + + + + + + + +
+

Firefox 62.0 released

+
([Development] Sep 5, 2018 17:31 UTC (Wed) (ris))
+
+
+ Mozilla has released Firefox 62.0, with several new features. The Firefox Home (default New Tab) allows users to display up to 4 rows of top sites, Pocket stories, and highlights; for those using containers there is menu option to reopen a tab in a different container; Firefox 63 will remove all trust for Symantec-issued certificates, and it is optional in Firefox 62; FreeBSD support for WebAuthn was added; and more. See the release notes for details. +
+
+
+ diff --git a/test/expected/LWN/0000764202.html b/test/expected/LWN/0000764202.html new file mode 100644 index 0000000..de063a5 --- /dev/null +++ b/test/expected/LWN/0000764202.html @@ -0,0 +1,25 @@ + + + + + + + + +
+

Firefox 62.0 released

+
([Development] Sep 5, 2018 17:31 UTC (Wed) (ris))
+
+ +
+
+ Mozilla has released Firefox 62.0, with several new features. The Firefox Home (default New Tab) allows users to display up to 4 rows of top sites, Pocket stories, and highlights; for those using containers there is menu option to reopen a tab in a different container; Firefox 63 will remove all trust for Symantec-issued certificates, and it is optional in Firefox 62; FreeBSD support for WebAuthn was added; and more. See the [1]release notes for details.



[1] https://www.mozilla.org/en-US/firefox/62.0/releasenotes/ +
+
+
+ diff --git a/test/expected/LWN/0000764209 b/test/expected/LWN/0000764209 new file mode 100644 index 0000000..3fbbdd3 --- /dev/null +++ b/test/expected/LWN/0000764209 @@ -0,0 +1,13 @@ + [$] STRENGTHENING USER-SPACE SPECTRE V2 PROTECTION + + [Kernel] Sep 5, 2018 21:47 UTC (Wed) (corbet) + + o News link: https://lwn.net/Articles/764209 + o Source link: + + + [$] Sorry, this article is currently available to LWN + suscribers only [https://lwn.net/subscribe/]. + + + diff --git a/test/expected/LWN/0000764209.header b/test/expected/LWN/0000764209.header new file mode 100644 index 0000000..d0410ed --- /dev/null +++ b/test/expected/LWN/0000764209.header @@ -0,0 +1,15 @@ +0[$] Strengthening user-space Spectre v2 protection null/LWN/0000764209 70 +i [Kernel] Sep 5, 2018 21:47 UTC (Wed) (corbet) +i +i The Spectre variant 2 vulnerability allows the speculative +i execution of incorrect (in an attacker-controllable way) +i indirect branch predictions, resulting in the ability to +i exfiltrate information via side channels. The kernel has been +i reasonably well protected against this variant since shortly +i after its disclosure in January. It is, however, possible for +i user-space processes to use Spectre v2 to attack each other; +i thus far, the mainline kernel has offered relatively little +i protection against such attacks. A recent proposal from Jiri +i Kosina may change that situation, but there are still some +i disagreements around the details. +i diff --git a/test/expected/LWN/0000764209.header.html b/test/expected/LWN/0000764209.header.html new file mode 100644 index 0000000..0e94410 --- /dev/null +++ b/test/expected/LWN/0000764209.header.html @@ -0,0 +1,20 @@ + + + + + + + + +
+

[$] Strengthening user-space Spectre v2 protection

+
([Kernel] Sep 5, 2018 21:47 UTC (Wed) (corbet))
+
+
+ The Spectre variant 2 vulnerability allows the speculative execution of incorrect (in an attacker-controllable way) indirect branch predictions, resulting in the ability to exfiltrate information via side channels. The kernel has been reasonably well protected against this variant since shortly after its disclosure in January. It is, however, possible for user-space processes to use Spectre v2 to attack each other; thus far, the mainline kernel has offered relatively little protection against such attacks. A recent proposal from Jiri Kosina may change that situation, but there are still some disagreements around the details. +
+
+
+ diff --git a/test/expected/LWN/0000764209.html b/test/expected/LWN/0000764209.html new file mode 100644 index 0000000..c24f3ff --- /dev/null +++ b/test/expected/LWN/0000764209.html @@ -0,0 +1,25 @@ + + + + + + + + +
+

[$] Strengthening user-space Spectre v2 protection

+
([Kernel] Sep 5, 2018 21:47 UTC (Wed) (corbet))
+
+ +
+
+ [$] Sorry, this article is currently available to LWN suscribers only [https://lwn.net/subscribe/]. +
+
+
+ diff --git a/test/expected/LWN/0000764219 b/test/expected/LWN/0000764219 new file mode 100644 index 0000000..b91238e --- /dev/null +++ b/test/expected/LWN/0000764219 @@ -0,0 +1,161 @@ + GNOME 3.30 RELEASED + + [Development] Sep 5, 2018 21:17 UTC (Wed) (ris) + + o News link: https://lwn.net/Articles/764219 + o Source link: + + + The GNOME Project has announced the release of GNOME 3.30 + "Almería". " This release brings automatic updates in + Software, more games, and a new Podcasts application. + Improvements to core GNOME applications include a refined + location and search bar in Files, a [Thunderbolt] panel in + Settings, support for remoting using RDP in Boxes, and many + more. " The [1]release notes contain more information. + + From : + + Matthias Clasen via devel-announce-list + + + To : + + gnome-announce-list-AT-gnome.org, devel-announce-list-AT-gnome- + .org + + Subject : + + GNOME 3.30 released + + Date : + + Wed, 5 Sep 2018 16:41:54 -0400 + + Message-ID : + + + + Cc : + + Matthias Clasen + + Archive-link : + + [2]Article + + The GNOME Project is proud to announce the release of GNOME + 3.30, “Almería” + + This release brings automatic updates in Software, more games, + and a new + + Podcasts application. + + Improvements to core GNOME applications include a refined + location and + + search + + bar in Files, a Thunderbold panel in Settings, support for + remoting using + + RDP + + in Boxes, and many more. + + More information about the changes in GNOME 3.30 can be found + in the + + release notes: + + https://help.gnome.org/misc/release-notes/3.30/ + + For the release team, this release is particularly exciting + because it is + + the + + first one that has been produced and verified with our new CI + infrastructure + + in gitlab.gnome.org. + + GNOME 3.30 will be available shortly in many distributions. If + you want to + + try it + + today, you can use the soon-to-be-released Fedora 29 or the + openSUSE nightly + + live images which will both include GNOME 3.30 very soon. + + https://www.gnome.org/getting-gnome/ + + http://download.opensuse.org/repositories/GNOME:/Medias/i... + + To try the very latest developments in GNOME, you can also use + Fedora + + Silverblue, + + whose rawhide branch always includes the latest GNOME packages. + + https://kojipkgs.fedoraproject.org/compose/rawhide/latest... + + If you are interested in building applications for GNOME 3.30, + look for the + + GNOME 3.30 Flatpak SDK, which will be available in the + sdk.gnome.org + + repository + + soon. + + This six-month effort wouldn't have been possible without the + whole + + GNOME community, made of contributors and friends from all + around the + + world: developers, designers, documentation writers, usability + and + + accessibility specialists, translators, maintainers, students, + system + + administrators, companies, artists, testers and last, but not + least, our + + users. + + GNOME would not exist without all of you. Thank you to + everyone! + + Our next release, GNOME 3.32, is planned for March 2019. Until + then, + + enjoy GNOME 3.30! + + The GNOME Release Team + + -- + + devel-announce-list mailing list + + devel-announce-list@gnome.org + + https://mail.gnome.org/mailman/listinfo/devel-announce-list + + + + [1] https://help.gnome.org/misc/release-notes/3.30/ + + [2] http://www.mail-archive.com/search?l=mid&q=CAFwd_vCdnMhopZ- + sZMq2M-N7DfQbUheTCfDb--Lgn6rrAXPyfdQ%40mail.gmail.com + + + diff --git a/test/expected/LWN/0000764219.header b/test/expected/LWN/0000764219.header new file mode 100644 index 0000000..4985147 --- /dev/null +++ b/test/expected/LWN/0000764219.header @@ -0,0 +1,11 @@ +0GNOME 3.30 released null/LWN/0000764219 70 +i [Development] Sep 5, 2018 21:17 UTC (Wed) (ris) +i +i The GNOME Project has announced the release of GNOME 3.30 +i "Almería". "This release brings automatic updates in Software, +i more games, and a new Podcasts application. Improvements to +i core GNOME applications include a refined location and search +i bar in Files, a [Thunderbolt] panel in Settings, support for +i remoting using RDP in Boxes, and many more." The release notes +i contain more information. +i diff --git a/test/expected/LWN/0000764219.header.html b/test/expected/LWN/0000764219.header.html new file mode 100644 index 0000000..b143c4c --- /dev/null +++ b/test/expected/LWN/0000764219.header.html @@ -0,0 +1,20 @@ + + + + + + + + +
+

GNOME 3.30 released

+
([Development] Sep 5, 2018 21:17 UTC (Wed) (ris))
+
+
+ The GNOME Project has announced the release of GNOME 3.30 "Almería". "This release brings automatic updates in Software, more games, and a new Podcasts application. Improvements to core GNOME applications include a refined location and search bar in Files, a [Thunderbolt] panel in Settings, support for remoting using RDP in Boxes, and many more." The release notes contain more information. +
+
+
+ diff --git a/test/expected/LWN/0000764219.html b/test/expected/LWN/0000764219.html new file mode 100644 index 0000000..d82900d --- /dev/null +++ b/test/expected/LWN/0000764219.html @@ -0,0 +1,25 @@ + + + + + + + + +
+

GNOME 3.30 released

+
([Development] Sep 5, 2018 21:17 UTC (Wed) (ris))
+
+ +
+
+ The GNOME Project has announced the release of GNOME 3.30 "Almería". " This release brings automatic updates in Software, more games, and a new Podcasts application. Improvements to core GNOME applications include a refined location and search bar in Files, a [Thunderbolt] panel in Settings, support for remoting using RDP in Boxes, and many more. " The [1]release notes contain more information.

From :

Matthias Clasen via devel-announce-list <devel-announce-list-AT-gnome.org>

To :

gnome-announce-list-AT-gnome.org, devel-announce-list-AT-gnome.org

Subject :

GNOME 3.30 released

Date :

Wed, 5 Sep 2018 16:41:54 -0400

Message-ID :

<CAFwd_vCdnMhopZsZMq2M-N7DfQbUheTCfDb--Lgn6rrAXPyfdQ@mail.gmail.com>

Cc :

Matthias Clasen <matthias.clasen-AT-gmail.com>

Archive-link :

[2]Article

The GNOME Project is proud to announce the release of GNOME 3.30, “Almería”

This release brings automatic updates in Software, more games, and a new

Podcasts application.

Improvements to core GNOME applications include a refined location and

search

bar in Files, a Thunderbold panel in Settings, support for remoting using

RDP

in Boxes, and many more.

More information about the changes in GNOME 3.30 can be found in the

release notes:

https://help.gnome.org/misc/release-notes/3.30/

For the release team, this release is particularly exciting because it is

the

first one that has been produced and verified with our new CI infrastructure

in gitlab.gnome.org.

GNOME 3.30 will be available shortly in many distributions. If you want to

try it

today, you can use the soon-to-be-released Fedora 29 or the openSUSE nightly

live images which will both include GNOME 3.30 very soon.

https://www.gnome.org/getting-gnome/

http://download.opensuse.org/repositories/GNOME:/Medias/i...

To try the very latest developments in GNOME, you can also use Fedora

Silverblue,

whose rawhide branch always includes the latest GNOME packages.

https://kojipkgs.fedoraproject.org/compose/rawhide/latest...

If you are interested in building applications for GNOME 3.30, look for the

GNOME 3.30 Flatpak SDK, which will be available in the sdk.gnome.org

repository

soon.

This six-month effort wouldn't have been possible without the whole

GNOME community, made of contributors and friends from all around the

world: developers, designers, documentation writers, usability and

accessibility specialists, translators, maintainers, students, system

administrators, companies, artists, testers and last, but not least, our

users.

GNOME would not exist without all of you. Thank you to everyone!

Our next release, GNOME 3.32, is planned for March 2019. Until then,

enjoy GNOME 3.30!

The GNOME Release Team

--

devel-announce-list mailing list

devel-announce-list@gnome.org

https://mail.gnome.org/mailman/listinfo/devel-announce-list



[1] https://help.gnome.org/misc/release-notes/3.30/

[2] http://www.mail-archive.com/search?l=mid&q=CAFwd_vCdnMhopZsZMq2M-N7DfQbUheTCfDb--Lgn6rrAXPyfdQ%40mail.gmail.com +
+
+
+ diff --git a/test/expected/LWN/0000764300 b/test/expected/LWN/0000764300 new file mode 100644 index 0000000..64a081b --- /dev/null +++ b/test/expected/LWN/0000764300 @@ -0,0 +1,312 @@ + SECURITY UPDATES FOR THURSDAY + + [Security] Sep 6, 2018 13:55 UTC (Thu) (jake) + + o News link: https://lwn.net/Articles/764300 + o Source link: + + + Dist. + + ID + + Release + + Package + + Date + + Debian + + [1]DSA-4286-1 + + stable + + curl + + 2018-09-05 + + Debian + + [2]DLA-1494-1 + + LTS + + gdm3 + + 2018-09-05 + + Debian + + [3]DLA-1495-1 + + LTS + + git-annex + + 2018-09-05 + + Debian + + [4]DLA-1496-1 + + LTS + + lcms2 + + 2018-09-06 + + Debian + + [5]DSA-4285-1 + + stable + + sympa + + 2018-09-05 + + Fedora + + [6]FEDORA-2018-38bdbafa96 + + F28 + + discount + + 2018-09-06 + + Fedora + + [7]FEDORA-2018-fe437a98d6 + + F27 + + dolphin-emu + + 2018-09-06 + + Fedora + + [8]FEDORA-2018-5bf744beee + + F28 + + gd + + 2018-09-06 + + Fedora + + [9]FEDORA-2018-fac5420dd1 + + F27 + + obs-build + + 2018-09-06 + + Fedora + + [10]FEDORA-2018-fac5420dd1 + + F27 + + osc + + 2018-09-06 + + Fedora + + [11]FEDORA-2018-4f0b7d1251 + + F27 + + tcpflow + + 2018-09-06 + + Fedora + + [12]FEDORA-2018-5ad77cc979 + + F28 + + tcpflow + + 2018-09-06 + + Fedora + + [13]FEDORA-2018-7626df1731 + + F27 + + yara + + 2018-09-06 + + Fedora + + [14]FEDORA-2018-8344cb89ac + + F28 + + yara + + 2018-09-06 + + openSUSE + + [15]openSUSE-SU-2018:2628-1 + + 15.0 + + wireshark + + 2018-09-05 + + Slackware + + [16]SSA:2018-249-01 + + curl + + 2018-09-06 + + Slackware + + [17]SSA:2018-249-03 + + firefox + + 2018-09-06 + + Slackware + + [18]SSA:2018-249-02 + + ghostscript + + 2018-09-06 + + Slackware + + [19]SSA:2018-249-04 + + thunderbird + + 2018-09-06 + + SUSE + + [20]SUSE-SU-2018:2630-1 + + SLE15 + + apache-pdfbox + + 2018-09-06 + + SUSE + + [21]SUSE-SU-2018:2629-1 + + curl + + 2018-09-05 + + SUSE + + [22]SUSE-SU-2018:2632-1 + + OS7 SLE12 + + dovecot22 + + 2018-09-06 + + SUSE + + [23]SUSE-SU-2018:2631-1 + + OS7 SLE12 + + libvirt + + 2018-09-06 + + Ubuntu + + [24]USN-3759-2 + + 12.04 + + libtirpc + + 2018-09-05 + + Ubuntu + + [25]USN-3759-1 + + 14.04 16.04 18.04 + + libtirpc + + 2018-09-05 + + + + [1] https://lwn.net/Articles/764275/ + + [2] https://lwn.net/Articles/764276/ + + [3] https://lwn.net/Articles/764277/ + + [4] https://lwn.net/Articles/764278/ + + [5] https://lwn.net/Articles/764279/ + + [6] https://lwn.net/Articles/764280/ + + [7] https://lwn.net/Articles/764281/ + + [8] https://lwn.net/Articles/764282/ + + [9] https://lwn.net/Articles/764283/ + + [10] https://lwn.net/Articles/764284/ + + [11] https://lwn.net/Articles/764285/ + + [12] https://lwn.net/Articles/764286/ + + [13] https://lwn.net/Articles/764287/ + + [14] https://lwn.net/Articles/764288/ + + [15] https://lwn.net/Articles/764289/ + + [16] https://lwn.net/Articles/764290/ + + [17] https://lwn.net/Articles/764292/ + + [18] https://lwn.net/Articles/764291/ + + [19] https://lwn.net/Articles/764293/ + + [20] https://lwn.net/Articles/764294/ + + [21] https://lwn.net/Articles/764295/ + + [22] https://lwn.net/Articles/764296/ + + [23] https://lwn.net/Articles/764297/ + + [24] https://lwn.net/Articles/764298/ + + [25] https://lwn.net/Articles/764299/ + + + diff --git a/test/expected/LWN/0000764300.header b/test/expected/LWN/0000764300.header new file mode 100644 index 0000000..a34ccd1 --- /dev/null +++ b/test/expected/LWN/0000764300.header @@ -0,0 +1,10 @@ +0Security updates for Thursday null/LWN/0000764300 70 +i [Security] Sep 6, 2018 13:55 UTC (Thu) (jake) +i +i Security updates have been issued by Debian (curl, gdm3, +i git-annex, lcms2, and sympa), Fedora (discount, dolphin-emu, +i gd, obs-build, osc, tcpflow, and yara), openSUSE (wireshark), +i Slackware (curl, firefox, ghostscript, and thunderbird), SUSE +i (apache-pdfbox, curl, dovecot22, and libvirt), and Ubuntu +i (libtirpc). +i diff --git a/test/expected/LWN/0000764300.header.html b/test/expected/LWN/0000764300.header.html new file mode 100644 index 0000000..9bed822 --- /dev/null +++ b/test/expected/LWN/0000764300.header.html @@ -0,0 +1,20 @@ + + + + + + + + +
+

Security updates for Thursday

+
([Security] Sep 6, 2018 13:55 UTC (Thu) (jake))
+
+
+ Security updates have been issued by Debian (curl, gdm3, git-annex, lcms2, and sympa), Fedora (discount, dolphin-emu, gd, obs-build, osc, tcpflow, and yara), openSUSE (wireshark), Slackware (curl, firefox, ghostscript, and thunderbird), SUSE (apache-pdfbox, curl, dovecot22, and libvirt), and Ubuntu (libtirpc). +
+
+
+ diff --git a/test/expected/LWN/0000764300.html b/test/expected/LWN/0000764300.html new file mode 100644 index 0000000..968dbd3 --- /dev/null +++ b/test/expected/LWN/0000764300.html @@ -0,0 +1,25 @@ + + + + + + + + +
+

Security updates for Thursday

+
([Security] Sep 6, 2018 13:55 UTC (Thu) (jake))
+
+ +
+
+ Dist.

ID

Release

Package

Date

Debian

[1]DSA-4286-1

stable

curl

2018-09-05

Debian

[2]DLA-1494-1

LTS

gdm3

2018-09-05

Debian

[3]DLA-1495-1

LTS

git-annex

2018-09-05

Debian

[4]DLA-1496-1

LTS

lcms2

2018-09-06

Debian

[5]DSA-4285-1

stable

sympa

2018-09-05

Fedora

[6]FEDORA-2018-38bdbafa96

F28

discount

2018-09-06

Fedora

[7]FEDORA-2018-fe437a98d6

F27

dolphin-emu

2018-09-06

Fedora

[8]FEDORA-2018-5bf744beee

F28

gd

2018-09-06

Fedora

[9]FEDORA-2018-fac5420dd1

F27

obs-build

2018-09-06

Fedora

[10]FEDORA-2018-fac5420dd1

F27

osc

2018-09-06

Fedora

[11]FEDORA-2018-4f0b7d1251

F27

tcpflow

2018-09-06

Fedora

[12]FEDORA-2018-5ad77cc979

F28

tcpflow

2018-09-06

Fedora

[13]FEDORA-2018-7626df1731

F27

yara

2018-09-06

Fedora

[14]FEDORA-2018-8344cb89ac

F28

yara

2018-09-06

openSUSE

[15]openSUSE-SU-2018:2628-1

15.0

wireshark

2018-09-05

Slackware

[16]SSA:2018-249-01

curl

2018-09-06

Slackware

[17]SSA:2018-249-03

firefox

2018-09-06

Slackware

[18]SSA:2018-249-02

ghostscript

2018-09-06

Slackware

[19]SSA:2018-249-04

thunderbird

2018-09-06

SUSE

[20]SUSE-SU-2018:2630-1

SLE15

apache-pdfbox

2018-09-06

SUSE

[21]SUSE-SU-2018:2629-1

curl

2018-09-05

SUSE

[22]SUSE-SU-2018:2632-1

OS7 SLE12

dovecot22

2018-09-06

SUSE

[23]SUSE-SU-2018:2631-1

OS7 SLE12

libvirt

2018-09-06

Ubuntu

[24]USN-3759-2

12.04

libtirpc

2018-09-05

Ubuntu

[25]USN-3759-1

14.04 16.04 18.04

libtirpc

2018-09-05



[1] https://lwn.net/Articles/764275/

[2] https://lwn.net/Articles/764276/

[3] https://lwn.net/Articles/764277/

[4] https://lwn.net/Articles/764278/

[5] https://lwn.net/Articles/764279/

[6] https://lwn.net/Articles/764280/

[7] https://lwn.net/Articles/764281/

[8] https://lwn.net/Articles/764282/

[9] https://lwn.net/Articles/764283/

[10] https://lwn.net/Articles/764284/

[11] https://lwn.net/Articles/764285/

[12] https://lwn.net/Articles/764286/

[13] https://lwn.net/Articles/764287/

[14] https://lwn.net/Articles/764288/

[15] https://lwn.net/Articles/764289/

[16] https://lwn.net/Articles/764290/

[17] https://lwn.net/Articles/764292/

[18] https://lwn.net/Articles/764291/

[19] https://lwn.net/Articles/764293/

[20] https://lwn.net/Articles/764294/

[21] https://lwn.net/Articles/764295/

[22] https://lwn.net/Articles/764296/

[23] https://lwn.net/Articles/764297/

[24] https://lwn.net/Articles/764298/

[25] https://lwn.net/Articles/764299/ +
+
+
+ diff --git a/test/expected/LWN/0000764321 b/test/expected/LWN/0000764321 new file mode 100644 index 0000000..df52d21 --- /dev/null +++ b/test/expected/LWN/0000764321 @@ -0,0 +1,88 @@ + THE HIDDEN BENEFIT OF GIVING BACK TO OPEN SOURCE SOFTWARE + (WORKING KNOWLEDGE) + + [Briefs] Sep 6, 2018 16:56 UTC (Thu) (corbet) + + o News link: https://lwn.net/Articles/764321/ + o Source link: + + + The Harvard Business School's "Working Knowledge" site has + [1]an article arguing that it can pay for companies to allow + their developers to contribute back to the projects whose + software they use. " And that presents an interesting dilemma + for firms that rely heavily on open source. Should they allow + employees on company time to make updates and edits to the + software for community use that could be used by competitors? + New research by Assistant Professor Frank Nagle, a member of + the Strategy Unit at Harvard Business School, shows that + paying employees to contribute to such software boosts the + company’s productivity from using the software by as much as + 100 percent, when compared with free-riding competitors. " + + + + [1] https://hbswk.hbs.edu/item/the-hidden-benefit-of-giving-ba- + ck-to-open-source-software + + + ** The Hidden Benefit of Giving Back to Open Source Software + (Working Knowledge) + + This is no surprise to me. Most of the open source software + improvements that might help a competitor are too general in + nature to really be giving the other guys a competitive + advantage. + + For instance, if Lyft contributed Linux kernel or PHP or Apache + or whatever fixes, the benefit to Lyft of having that improved + expertise far exceeds the general benefit to competitor Uber. + + + ** The Hidden Benefit of Giving Back to Open Source Software + (Working Knowledge) + + This is no surprise to me. Most of the open source software + improvements that might help a competitor are too general in + nature to really be giving the other guys a competitive + advantage. + + For instance, if Lyft contributed Linux kernel or PHP or + Apache or whatever fixes, the benefit to Lyft of having that + improved expertise far exceeds the general benefit to + competitor Uber. + + + + ** The Hidden Benefit of Giving Back to Open Source Software + (Working Knowledge) + + Even having to debate it seems so farcical. If you're worried + about people who "do the same thing", the software they use + is not the main differentiator. How your company is + organized, how you treat your people and your customers, how + you organized projects etc are huge, and software is + ultimately minor. Fixes and changes to software? Incredibly + minor. + + + + ** The Hidden Benefit of Giving Back to Open Source Software + (Working Knowledge) + + Perhaps this is too dismissive, as there is the part about + letting your programmers do their job to the best of their + ability. That seems pretty big. + + + ** The Hidden Benefit of Giving Back to Open Source Software + (Working Knowledge) + + Perhaps this is too dismissive, as there is the part about + letting your programmers do their job to the best of their + ability. That seems pretty big. + + + + + diff --git a/test/expected/LWN/0000764321.header b/test/expected/LWN/0000764321.header new file mode 100644 index 0000000..9cdc345 --- /dev/null +++ b/test/expected/LWN/0000764321.header @@ -0,0 +1,16 @@ +0The Hidden Benefit of Giving Back to Open Source Software (Working Knowledge) null/LWN/0000764321 70 +i [Briefs] Sep 6, 2018 16:56 UTC (Thu) (corbet) +i +i The Harvard Business School's "Working Knowledge" site has an +i article arguing that it can pay for companies to allow their +i developers to contribute back to the projects whose software +i they use. "And that presents an interesting dilemma for firms +i that rely heavily on open source. Should they allow employees +i on company time to make updates and edits to the software for +i community use that could be used by competitors? New research +i by Assistant Professor Frank Nagle, a member of the Strategy +i Unit at Harvard Business School, shows that paying employees +i to contribute to such software boosts the company’s +i productivity from using the software by as much as 100 +i percent, when compared with free-riding competitors." +i diff --git a/test/expected/LWN/0000764321.header.html b/test/expected/LWN/0000764321.header.html new file mode 100644 index 0000000..eb8b490 --- /dev/null +++ b/test/expected/LWN/0000764321.header.html @@ -0,0 +1,20 @@ + + + + + + + + +
+

The Hidden Benefit of Giving Back to Open Source Software (Working Knowledge)

+
([Briefs] Sep 6, 2018 16:56 UTC (Thu) (corbet))
+
+
+ The Harvard Business School's "Working Knowledge" site has an article arguing that it can pay for companies to allow their developers to contribute back to the projects whose software they use. "And that presents an interesting dilemma for firms that rely heavily on open source. Should they allow employees on company time to make updates and edits to the software for community use that could be used by competitors? New research by Assistant Professor Frank Nagle, a member of the Strategy Unit at Harvard Business School, shows that paying employees to contribute to such software boosts the company’s productivity from using the software by as much as 100 percent, when compared with free-riding competitors." +
+
+
+ diff --git a/test/expected/LWN/0000764321.html b/test/expected/LWN/0000764321.html new file mode 100644 index 0000000..a424258 --- /dev/null +++ b/test/expected/LWN/0000764321.html @@ -0,0 +1,50 @@ + + + + + + + + +
+

The Hidden Benefit of Giving Back to Open Source Software (Working Knowledge)

+
([Briefs] Sep 6, 2018 16:56 UTC (Thu) (corbet))
+
+ +
+
+ The Harvard Business School's "Working Knowledge" site has [1]an article arguing that it can pay for companies to allow their developers to contribute back to the projects whose software they use. " And that presents an interesting dilemma for firms that rely heavily on open source. Should they allow employees on company time to make updates and edits to the software for community use that could be used by competitors? New research by Assistant Professor Frank Nagle, a member of the Strategy Unit at Harvard Business School, shows that paying employees to contribute to such software boosts the company’s productivity from using the software by as much as 100 percent, when compared with free-riding competitors. "



[1] https://hbswk.hbs.edu/item/the-hidden-benefit-of-giving-back-to-open-source-software +
+
+
+

The Hidden Benefit of Giving Back to Open Source Software (Working Knowledge)

+
+

This is no surprise to me. Most of the open source software improvements that might help a competitor are too general in nature to really be giving the other guys a competitive advantage.

For instance, if Lyft contributed Linux kernel or PHP or Apache or whatever fixes, the benefit to Lyft of having that improved expertise far exceeds the general benefit to competitor Uber.

+
+

The Hidden Benefit of Giving Back to Open Source Software (Working Knowledge)

+
+

This is no surprise to me. Most of the open source software improvements that might help a competitor are too general in nature to really be giving the other guys a competitive advantage.

For instance, if Lyft contributed Linux kernel or PHP or Apache or whatever fixes, the benefit to Lyft of having that improved expertise far exceeds the general benefit to competitor Uber.

+
+
+

The Hidden Benefit of Giving Back to Open Source Software (Working Knowledge)

+
+

Even having to debate it seems so farcical. If you're worried about people who "do the same thing", the software they use is not the main differentiator. How your company is organized, how you treat your people and your customers, how you organized projects etc are huge, and software is ultimately minor. Fixes and changes to software? Incredibly minor.

+
+
+

The Hidden Benefit of Giving Back to Open Source Software (Working Knowledge)

+
+

Perhaps this is too dismissive, as there is the part about letting your programmers do their job to the best of their ability. That seems pretty big.

+
+

The Hidden Benefit of Giving Back to Open Source Software (Working Knowledge)

+
+

Perhaps this is too dismissive, as there is the part about letting your programmers do their job to the best of their ability. That seems pretty big.

+
+
+
+
+ diff --git a/test/source/LWN/Articles/763252.html b/test/source/LWN/Articles/763252.html new file mode 100644 index 0000000..6658210 --- /dev/null +++ b/test/source/LWN/Articles/763252.html @@ -0,0 +1,1864 @@ + + + LWN.net Weekly Edition for August 30, 2018 [LWN.net] + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+ User: Password:
| +
+ +
| +
+ +
+
+ +
+
+
+

LWN.net Weekly Edition for August 30, 2018

+
+
+

Welcome to the LWN.net Weekly Edition for August 30, 2018

+ +This edition contains the following feature content: +

+

+ +

+ This week's edition also includes these inner pages: +

+

    + +
  • Brief items: Brief news items from throughout the community. + +
  • Announcements: Newsletters, conferences, security updates, patches, and more. + +
+

+ Please enjoy this week's edition, and, as always, thank you for + supporting LWN.net. +

Comments (none posted) +

+

An introduction to the Julia language, part 1

+ +
+

August 28, 2018

+

This article was contributed by Lee Phillips

+
+

Julia is a young computer language +aimed at serving the needs of scientists, engineers, and other +practitioners of numerically intensive programming. It was first publicly +released in 2012. After an intense period of language development, version +1.0 was released on +August 8. The 1.0 release promises years of language +stability; users can be confident that developments in the 1.x series will +not break their code. + This is the first part of a two-part article introducing the world of Julia. + This part will introduce enough of the language syntax and constructs to + allow you to begin to write simple programs. The following installment will + acquaint you with the additional pieces needed to create real projects, and to + make use of Julia's ecosystem. + +

Goals and history

+ +

The Julia project has ambitious goals. It wants the language to perform +about as well as Fortran or C when running numerical algorithms, while +remaining as pleasant to program in as Python. I believe the project has +met these goals and is poised to see increasing adoption by numerical +researchers, especially now that an official, stable release is +available.

+ +

The Julia project maintains a micro-benchmark page that compares its +numerical performance against both statically compiled languages (C, +Fortran) and dynamically typed languages (R, Python). While it's certainly +possible to argue about the relevance and fairness of particular +benchmarks, the data overall supports the Julia team's contention that Julia +has generally achieved parity with Fortran and C; the benchmark +source code is available.

+ +

Julia began as research in computer science at MIT; its creators are +Alan Edelman, Stefan Karpinski, Jeff Bezanson, and Viral Shah. These four + remain active developers of the language. They, along with Keno Fischer, +co-founder and CTO of Julia +Computing, were kind enough to share their thoughts with us about +the language. I'll be drawing +on their comments later on; for now, let's get a taste of +what Julia code looks like.

+ +

Getting started

+ +

To explore Julia initially, start up its standard read-eval-print +loop (REPL) +by typing julia at the terminal, assuming that you have installed +it. You will then be +able to interact with what will seem to be an interpreted language — but, +behind the scenes, those commands are being compiled by a +just-in-time (JIT) compiler that uses the LLVM +compiler framework. This allows Julia to be interactive, while turning +the code into fast, native machine instructions. However, the JIT compiler +passes sometimes introduce noticeable delays at the REPL, especially when +using a function for the first time.

+ +

To run a Julia program non-interactively, execute a command like: +

+    $ julia script.jl <args>
+
+ +

Julia has all the usual data structures: numbers of various types +(including complex and rational numbers), multidimensional arrays, +dictionaries, strings, and characters. Functions are first-class: they can +be passed as arguments to other functions, can be members of arrays, +and so on.

+ +

Julia embraces Unicode. Strings, which are enclosed in double quotes, +are arrays of Unicode characters, which are enclosed in single quotes. The +"*" operator is used for string and character concatenation. Thus +'a' and 'β' are characters, and 'aβ' is a syntax error. "a" and +"β" are strings, as are "aβ", 'a' * 'β', and +"a" * "β" — all evaluate to the same string. + +

Variable and function names can contain non-ASCII characters. This, along +with Julia's clever syntax that understands numbers prepended to variables +to mean multiplication, goes a long way to allowing the numerical scientist +to write code that more closely resembles the compact mathematical notation +of the equations that usually lie behind it.

+ +
+    julia> ε₁ = 0.01
+    0.01
+
+    julia> ε₂ = 0.02
+    0.02
+
+    julia> 2ε₁ + 3ε₂
+    0.08
+
+ +

And where does Julia come down on the age-old debate of what do about +1/2? In Fortran and Python 2, this will get you 0, since 1 and 2 are +integers, and the result is rounded down to the integer 0. This was deemed +inconsistent, and confusing to some, so it was changed in Python 3 to +return 0.5 — which is what you +get in Julia, too.

+ +

While we're on the subject of fractions, Julia can handle rational +numbers, with a special syntax: 3//5 + 2//3 returns +19//15, while 3/5 + 2/3 +gets you the floating-point answer 1.2666666666666666. Internally, Julia +thinks of a rational number in its reduced form, so the expression +6//8 == 3//4 returns true, and numerator(6//8) returns +3.

+ +

Arrays

+ +

Arrays are enclosed in square brackets and indexed with an iterator that +can contain a step value:

+ +
+    julia> a = [1, 2, 3, 4, 5, 6]
+    6-element Array{Int64,1}:
+     1
+     2
+     3
+     4
+     5
+     6
+
+    julia> a[1:2:end]
+    3-element Array{Int64,1}:          
+     1
+     3
+     5
+
+ +

As you can see, indexing starts at one, and the useful end +index means the obvious thing. When you define a variable in the REPL, +Julia replies with the type and value of the assigned data; you can suppress this output by ending your input line with a semicolon.

+ +

Since arrays are such a vital part of numerical computation, and Julia +makes them easy to work with, we'll spend a bit more time with them than the other data structures.

+ +

To illustrate the syntax, we can start with a couple of 2D arrays, defined at the REPL:

+ +
+    julia> a = [1 2 3; 4 5 6]
+    2×3 Array{Int64,2}:
+     1  2  3
+     4  5  6
+
+    julia> z = [-1 -2 -3; -4 -5 -6];
+
+ +

Indexing is as expected:

+ +
+    julia> a[1, 2]
+    2
+
+ +

You can glue arrays together horizontally:

+ +
+    julia> [a z]
+    2×6 Array{Int64,2}:
+     1  2  3  -1  -2  -3
+     4  5  6  -4  -5  -6
+
+ +

And vertically:

+ +
+    julia> [a; z]
+    4×3 Array{Int64,2}:
+      1   2   3
+      4   5   6
+     -1  -2  -3
+     -4  -5  -6
+
+ +

Julia has all the usual operators for handling arrays, and linear +algebra functions that work with matrices (2D arrays). The linear +algebra functions are part of Julia's standard library, but need to be +imported with a command like "using LinearAlgebra", which is a detail +omitted from the current documentation. The functions include such things as +determinants, matrix inverses, eigenvalues and eigenvectors, many kinds of +matrix factorizations, etc. Julia has not reinvented the wheel here, but +wisely uses the LAPACK Fortran +library of battle-tested linear algebra routines.

+ +

The extension of arithmetic operators to arrays is usually intuitive:

+ +
+    julia> a + z
+    2×3 Array{Int64,2}:
+     0  0  0
+     0  0  0
+
+ +

And the numerical prepending syntax works with arrays, too:

+ +
+    julia> 3a + 4z
+    2×3 Array{Int64,2}:
+     -1  -2  -3
+     -4  -5  -6
+
+ +

Putting a multiplication operator between two matrices gets you matrix +multiplication:

+ +
+    julia> a * transpose(a)
+    2×2 Array{Int64,2}:
+     14  32
+     32  77
+
+ +

You can "broadcast" numbers to cover all the elements in an +array by prepending the usual arithmetic operators with a dot:

+ +
+    julia> 1 .+ a
+    2×3 Array{Int64,2}:
+     2  3  4
+     5  6  7
+
+ +

Note that the language only actually requires the dot for some +operators, but not for others, such as "*" and "/". The +reasons for this are arcane, and it probably makes sense to be consistent +and use the dot whenever you intend broadcasting. Note also that the +current version of the official documentation is incorrect in claiming that +you may omit the dot from "+" and "-"; in fact, this +now gives an error.

+ +

You can use the dot notation to turn any function into one that operates +on each element of an array:

+ +
+    julia> round.(sin.([0, π/2, π, 3π/2, 2π]))
+    5-element Array{Float64,1}:
+      0.0
+      1.0
+      0.0
+     -1.0
+     -0.0
+
+ +

The example above illustrates chaining two dotted functions +together. The Julia compiler turns expressions like this into +"fused" operations: instead of applying each function in turn to +create a new array that is passed to the next function, the compiler +combines the functions into a single compound function that is applied once +over the array, creating a significant optimization.

+ +

You can use this dot notation with any function, including your own, to +turn it into a version that operates element-wise over arrays.

+ +

Dictionaries (associative arrays) can be defined with several +syntaxes. Here's one:

+ +
+    julia> d1 = Dict("A"=>1, "B"=>2)
+    Dict{String,Int64} with 2 entries:
+      "B" => 2
+      "A" => 1
+
+ +

You may have noticed that the code snippets so far have not included any +type declarations. Every value in Julia has a type, but the compiler will +infer types if they are not specified. It is generally not necessary to +declare types for performance, but type declarations sometimes serve other +purposes, that we'll return to later. Julia has a deep and sophisticated +type system, including user-defined types and C-like structs. Types can +have behaviors associated with them, and can inherit behaviors from other +types. The best thing about Julia's type system is that you can ignore it +entirely, use just a few pieces of it, or spend weeks studying its +design.

+ +

Control flow

+ +

Julia code is organized in blocks, which can indicate control flow, +function definitions, and other code units. Blocks are terminated with the +end keyword, and indentation is not significant. Statements +are separated either with newlines or semicolons.

+ +

Julia has the typical control flow constructs; here is a +while block:

+ +
+    julia> i = 1;
+
+    julia> while i < 5
+	       print(i)
+	       global i = i + 1
+	   end
+    1234
+
+ +

Notice the global keyword. Most blocks in Julia introduce a +local scope for variables; without this keyword here, we would get an error +about an undefined variable.

+ +

Julia has the usual if statements and for +loops that use the same iterators that we introduced above for array +indexing. We can also iterate over collections:

+ +
+    julia> for i ∈ ['a', 'b', 'c']
+	       println(i)
+	   end
+    a
+    b
+    c
+
+ +

In place of the fancy math symbol in this for loop, we can +use "=" or "in". If you want to use +the math symbol but +have no convenient way to type it, the REPL will help you: type +"\in" and the TAB key, and the symbol appears; you can type many +LaTeX expressions into the +REPL in this way.

+ +

Development of Julia

+ +

The language is developed on GitHub, with over 700 contributors. The +Julia team mentioned in their email to us that the decision to use GitHub +has been particularly good for Julia, as it streamlined the process for +many of their contributors, who are scientists or domain experts in various +fields, rather than professional software developers.

+ +

The creators of Julia have published +[PDF] +a detailed “mission statement” for the language, describing their aims and +motivations. A key issue that they wanted their language to solve is what +they called the "two-language problem." This situation is +familiar to anyone who has used Python or another dynamic language on a +demanding numerical problem. To get good performance, you will wind up +rewriting the numerically intensive parts of the program in C or Fortran, +dealing with the interface between the two languages, and may still be +disappointed in the overhead presented by calling the foreign routines from +your original code. + +

+For Python, NumPy and SciPy wrap many +numerical routines, written in Fortran or C, for efficient use from that +language, but you can only take advantage of this if your calculation fits +the pattern of an available routine; in more general cases, where you will +have to write a loop over your data, you are stuck with Python's native +performance, which is orders of magnitude slower. If you switch to an +alternative, faster implementation of Python, such as PyPy, the numerical libraries may not be +compatible; NumPy became available for PyPy only within about the past +year.

+ +

Julia solves the two-language problem by being as expressive and simple +to program in as a dynamic scripting language, while having the native +performance of a static, compiled language. There is no need to write +numerical libraries in a second language, but C or Fortran library routines +can be called using a facility that Julia has built-in. Other languages, +such as Python or R, can also interoperate +easily with Julia using external packages.

+ +

Documentation

+ +

There are many resources to turn to to learn the language. There is an +extensive and detailed manual at Julia +headquarters, and this may be a good place to start. However, although the +first few chapters provide a gentle introduction, the material soon becomes +dense and, at times, hard to follow, with references to concepts that are +not explained until later chapters. Fortunately, there is a "learning" link at the +top of the Julia home page, which takes you to a long list of videos, +tutorials, books, articles, and classes both about Julia and that use Julia +in teaching subjects such a numerical analysis. There is also a fairly good + cheat-sheet [PDF], which was +just updated for v. 1.0.

+ +

If you're coming from Python, this +list of noteworthy differences between Python and Julia syntax will +probably be useful.

+ +

Some of the linked tutorials are in the form of Jupyter notebooks — indeed, +the name "Jupyter" is formed from "Julia", +"Python", and "R", which are the three original languages supported by +the interface. The Julia +kernel for Jupyter was recently upgraded to support v. 1.0. Judicious +sampling of a variety of documentation sources, combined with liberal +experimentation, may be the best way of learning the language. Jupyter +makes this experimentation more inviting for those who enjoy the web-based +interface, but the REPL that comes with Julia helps a great deal in this +regard by providing, for instance, TAB completion and an extensive help +system invoked by simply pressing the "?" key.

+ +

Stay tuned

+ +

+ The next installment in this two-part series will explain how Julia is + organized around the concept of "multiple dispatch". You will learn how to + create functions and make elementary use of Julia's type system. We'll see how + to install packages and use modules, and how to make graphs. Finally, Part 2 + will briefly survey the important topics of macros and distributed computing. +

Comments (80 posted) +

+

C considered dangerous

+ +
+ By Jake Edge
August 29, 2018 +
+LSS NA +
+

+At the North America edition of the 2018 +Linux Security Summit (LSS NA), which was held in late August in Vancouver, +Canada, Kees Cook gave a presentation on some of the dangers that come with +programs written in C. In particular, of course, the Linux kernel is +mostly written in C, which means that the security of our systems rests on +a somewhat dangerous foundation. But there are things that can be done to +help firm things up by "Making C Less Dangerous" as the title +of his talk suggested. +

+ +

+He began with a brief summary of the work that he and others are doing as +part of the Kernel +Self Protection Project (KSPP). The goal of the project is to get +kernel protections merged into the mainline. These protections are not +targeted at protecting user-space processes from other (possibly rogue) +processes, but are, instead, focused on protecting the kernel from +user-space code. There are around 12 organizations and ten individuals +working on roughly 20 different technologies as part of the KSPP, he said. The +progress has been "slow and steady", he said, which is how he thinks it +should go. +

+ + + + + +

+One of the main problems is that C is treated mostly like a fancy assembler. +The kernel developers do this because they want the kernel to be as fast +and as small as possible. There are other reasons, too, such as the need to do +architecture-specific tasks that lack a C API (e.g. setting up page tables, +switching to 64-bit mode). +

+ +

+But there is lots of undefined behavior in C. This "operational baggage" +can lead to various problems. In addition, C has a weak standard library +with multiple utility functions that have various pitfalls. In C, the content +of uninitialized automatic variables is undefined, but in the machine code that it +gets translated to, the value is whatever happened to be in that memory +location before. In C, a function pointer can be called even if the type +of the pointer does not match the type of the function being +called—assembly doesn't care, it just jumps to a location, he said. +

+ +

+The APIs in the standard library are also bad in many cases. He asked: why +is there no argument to memcpy() to specify the maximum +destination length? He noted a recent blog +post from Raph Levien entitled "With Undefined Behavior, Anything is +Possible". That obviously resonated with Cook, as he pointed out his +T-shirt—with the title and artwork from the post. +

+ +

Less danger

+ +

+He then moved on to some things that kernel developers can do (and are +doing) to get away from some of the dangers of C. He began with +variable-length arrays (VLAs), which can be used to overflow the stack to +access +data outside of its region. Even if the stack has a guard page, VLAs can +be used to jump past it to write into other memory, which can then be used +by some other kind of attack. The C language is "perfectly fine with +this". It is easy to find uses of VLAs with the -Wvla flag, however. +

+ +

+But it turns out that VLAs are not just bad +from a security perspective, +they are also slow. In a micro-benchmark associated with a patch +removing a VLA, a 13% performance boost came from using a fixed-size +array. He dug in a bit further and found that much more code is being +generated to handle a VLA, which explains the speed increase. Since Linus +Torvalds has declared +that VLAs should be removed from the kernel because they cause security +problems and also slow the kernel down; Cook said "don't use VLAs". +

+ +

+Another problem area is switch statements, in particular where +there is no break for a case. That could mean that the +programmer expects and wants to fall through to the next case or it could +be that the break was simply forgotten. There is a way to get a +warning from the compiler for fall-throughs, but there needs to be a way to +mark those that are truly meant to be that way. A special fall-through +"statement" in the form of a comment is what has been agreed on within the +static-analysis community. He and others have been going through each of +the places where there is no break to add these comments (or a +break); they +have "found a lot of bugs this way", he said. +

+ +

+Uninitialized local variables will generate a warning, but not if the +variable is passed in by reference. There are some GCC plugins that will +automatically initialize these variables, but there are also patches for +both GCC and Clang to provide a compiler option to do so. Neither of those +is upstream yet, but Torvalds has praised the effort so the kernel would +likely use the option. An interesting side +effect that came about while investigating this was a warning he got about +unreachable code when he +enabled the auto-initialization. There were two variables declared just +after a switch (and outside of any case), where they +would never be reached. +

+ +

+Arithmetic overflow is another undefined behavior in C that can cause various + problems. GCC can check for signed overflow, which performs well +(the overhead is in the noise, he said), but adding warning messages for it does grow +the kernel by 6%; making the overflow abort, instead, only adds 0.1%. +Clang can check for both signed and unsigned overflow; signed overflow is +undefined, while unsigned overflow is defined, but often unexpected. +Marking places where unsigned overflow is expected is needed; it +would be nice to get those annotations put into the kernel, Cook said. +

+ +

+Explicit bounds checking is expensive. Doing it for +copy_{to,from}_user() is a less than 1% performance hit, but +adding it to the strcpy() and memcpy() families are +around a 2% hit. Pre-Meltdown that would have been a totally impossible +performance regression for security, he said; post-Meltdown, since it is +less than 5%, maybe there is a chance to add this checking. +

+ +

+Better APIs would help as well. He pointed to the evolution of +strcpy(), through strncpy() and +strlcpy() (each with their own bounds flaws) to +strscpy(), which seems to be "OK so far". He also mentioned +memcpy() again as a poor API with respect to bounds checking. +

+ +

+Hardware support for bounds checking is available in the application +data integrity (ADI) feature for SPARC and is coming for Arm; it may also be +available for Intel processors at some point. These all use a form of +"memory tagging", where allocations get a tag that is stored in the +high-order byte of the address. An offset from the address can be checked +by the hardware to see if it still falls within the allocated region based +on the tag. +

+ +

+Control-flow integrity (CFI) has become more of an issue lately because +much of what attackers had used in the past has been marked as "no execute" +so they are turning to using existing code "gadgets" already present in the +kernel by hijacking existing indirect function calls. In C, you can just call +pointers without regard to the type as it just treats them as an +address to jump to. Clang has a CFI-sanitize feature that enforces the +function prototype to restrict the calls that can be made. It is done at +runtime and is not perfect, in part because there are lots of functions in +the kernel that take one unsigned long parameter and return an unsigned long. +

+ +

+Attacks on CFI have both a "forward edge", which is what CFI sanitize +tries to handle, and a "backward edge" that comes from manipulating the stack +values, the return address in particular. Clang has two methods available +to prevent the stack manipulation. The first is the "safe stack", which +puts various important items (e.g. "safe" variables, register spills, and +the return address) on a separate stack. Alternatively, the "shadow stack" +feature creates a separate stack just for return addresses. +

+ +

+One problem with these other stacks is that they are still writable, so if +an attacker can find them in memory, they can still perform their attacks. +Hardware-based protections, like Intel's Control-Flow Enforcement +Technology (CET), provides a read-only shadow +call stack for return addresses. Another hardware protection is pointer authentication for Arm, which adds a +kind of encrypted tag to the return address that can be verified before it +is used. +

+ +

Status and challenges

+ +

+Cook then went through the current status of handling these different +problems in the kernel. VLAs are almost completely gone, he said, just a +few remain in the crypto subsystem; he hopes those VLAs will be gone by 4.20 (or +whatever the number of the next kernel release turns out to be). Once that +happens, he plans to turn on -Wvla for the kernel build so that +none creep back in. +

+ +

+There has been steady progress made on marking fall-through cases in +switch statements. Only 745 remain to be handled of the 2311 that +existed when this work started; each one requires scrutiny to determine +what the author's intent is. Auto-initialized local variables can be done +using compiler plugins, but that is "not quite what we want", he said. +More compiler support would be helpful there. For arithmetic overflow, it +would be nice to see GCC get support for the unsigned case, but memory +allocations are now doing explicit overflow checking at this point. +

+ +

+Bounds checking has seen some "crying about performance hits", so we are +waiting impatiently for hardware support, he said. CFI forward-edge +protection needs link-time optimization +(LTO) support for Clang in the kernel, but it is currently working on +Android. For backward-edge mitigation, the Clang shadow call stack is +working on Android, but we are impatiently waiting for hardware support for +that too. +

+ +

+There are a number of challenges in doing security development for the +kernel, Cook said. There are cultural boundaries due to conservatism +within the kernel community; that requires patiently working and reworking +features in order to get them upstream. There are, of course, technical +challenges because of the complexity of security changes; those kinds of +problems can be solved. There are also resource limitations in terms of +developers, testers, reviewers, and so on. KSPP and the other kernel +security developers are still making that "slow but steady" progress. +

+ +

+Cook's slides +[PDF] are available for interested readers; before long, there should +be a video available of the talk as well. + +

+[I would like to thank LWN's travel sponsor, the Linux Foundation, for +travel assistance to attend the Linux Security Summit in Vancouver.] +

Comments (70 posted) +

+

The second half of the 4.19 merge window

+ +
+ By Jonathan Corbet
August 26, 2018 +
+By the time Linus Torvalds released +4.19-rc1 and closed +the merge window for this development cycle, 12,317 non-merge +changesets had found their way into the mainline; about 4,800 of those +landed after last week's summary was +written. As tends to be the case +late in the merge window, many of those changes were fixes for the bigger +patches that went in early, but there were also a number of new features +added. Some of the more significant changes include: +
+

+ +

Core kernel

+

+

    + +
  • The full set of patches adding control-group awareness to the out-of-memory + killer has not been merged due to ongoing disagreements, + but one piece of it has: there is a new memory.oom.group + control knob that will cause all processes within a control group to + be killed in an out-of-memory situation. +
  • A new set of protections has been added to prevent an attacker from + fooling a program into writing to an existing file or FIFO. An open + with the O_CREAT flag to a file or FIFO in a world-writable, + sticky + directory (e.g. /tmp) will fail if the owner of the opening + process is not the owner of either the target file or the containing + directory. This behavior, disabled by default, is controlled by the + new protected_regular and protected_fifos sysctl + knobs. + +
+ +

Filesystems and block layer

+

+

    + +
  • The dm-integrity device-mapper target can now use a separate device + for metadata storage. +
  • EROFS, the "enhanced read-only filesystem", has been added to the + staging tree. It is "a lightweight read-only file system with + modern designs (eg. page-sized blocks, inline xattrs/data, etc.) for + scenarios which need high-performance read-only requirements, + eg. firmwares in mobile phone or LIVECDs" +
  • The new "metadata copy-up" feature in overlayfs will avoid copying a + file's contents to the upper layer on a metadata-only change. See this + commit for details. + +
+

+ +

Hardware support

+

+

    + +
  • Graphics: + Qualcomm Adreno A6xx GPUs. + +
  • Industrial I/O: + Spreadtrum SC27xx series PMIC analog-to-digital converters, + Analog Devices AD5758 digital-to-analog converters, + Intersil ISL29501 time-of-flight sensors, + Silicon Labs SI1133 UV index/ambient light sensor chips, and + Bosch Sensortec BME680 sensors. + + +
  • Miscellaneous: + Generic ADC-based resistive touchscreens, + Generic ASIC devices via the Google Gasket + framework, + Analog Devices ADGS1408/ADGS1409 multiplexers, + Actions Semi Owl SoCs DMA controllers, + MEN 16Z069 watchdog timers, + Rohm BU21029 touchscreen controllers, + Cirrus Logic CS47L35, CS47L85, CS47L90, and CS47L91 codecs, + Cougar 500k gaming keyboards, + Qualcomm GENI-based I2C controllers, + Actions Semiconductor Owl I2C controllers, + ChromeOS EC-based USBPD chargers, and + Analog Devices ADP5061 battery chargers. + +
  • USB: + Nuvoton NPCM7XX on-chip EHCI USB controllers, + Broadcom Stingray PCIe PHYs, and + Renesas R-Car generation 3 PCIe PHYs. + +
  • There is also a new subsystem for the abstraction of GNSS (global + navigation satellite systems — GPS, for example) receivers in the + kernel. To date, such devices have been handled with an abundance of + user-space drivers; the hope is to bring some order in this area. + Support for u-blox and SiRFstar receivers has been added as well. + +
+ + +

+

Kernel internal

+

+

    + +
  • The __deprecated marker, used to mark interfaces that should + no longer be used, has been deprecated and removed from the kernel + entirely. Torvalds + said: "They are not useful. They annoy + everybody, and nobody ever does anything about them, because it's + always 'somebody elses problem'. And when people start thinking that + warnings are normal, they stop looking at them, and the real warnings + that mean something go unnoticed." +
  • The minimum version of GCC required by the kernel has been moved up to + 4.6. + +
+

+ +There are a couple of significant changes that failed to get in this time +around, including the XArray data structure. The patches are +thought to be ready, but they had the bad luck to be based on a tree that +failed to be merged for other reasons, so Torvalds didn't +even look at them. That, in turn, blocks another set of patches intended to +enable migration of slab-allocated objects. +

+The other big deferral is the new system-call +API for filesystem mounting. Despite ongoing concerns about what happens when the same +low-level device is mounted multiple times with conflicting options, Al +Viro sent a pull +request to send this work upstream. The ensuing discussion made it +clear that there is still not a consensus in this area, though, so it seems +that this work has to wait for another cycle. +

+Assuming all goes well, the kernel will stabilize over the coming weeks and +the final 4.19 release will happen in mid-October. +

Comments (1 posted) +

+

Measuring (and fixing) I/O-controller throughput loss

+ +
+

August 29, 2018

+

This article was contributed by Paolo Valente

+
+

Many services, from web hosting and video streaming to cloud storage, +need to move data to and from storage. They also often require that each per-client +I/O flow be guaranteed a non-zero amount of bandwidth and a bounded latency. An +expensive way to provide these guarantees is to over-provision +storage resources, keeping each resource underutilized, and thus +have plenty of bandwidth available for the few I/O flows dispatched to +each medium. Alternatively one can use an I/O controller. Linux provides +two mechanisms designed to throttle some I/O streams to allow others to +meet their bandwidth and latency requirements. These mechanisms work, but +they come at a cost: a loss of as much as 80% of total available I/O +bandwidth. I have run some tests to demonstrate this problem; some +upcoming improvements to the bfq I/O +scheduler promise to improve the situation considerably. +

+ +

Throttling does guarantee control, even on drives that happen to be +highly utilized but, as will be seen, it has a hard time +actually ensuring that drives are highly utilized. Even with greedy I/O +flows, throttling +easily ends up utilizing as little as 20% of the available speed of a +flash-based drive. + +Such a speed loss may be particularly problematic with lower-end +storage. On the opposite end, it is also disappointing with +high-end hardware, as the Linux block I/O stack itself has been +redesigned from the ground up to fully utilize the +high speed of modern, fast storage. In +addition, throttling fails to guarantee the expected bandwidths if I/O +contains both reads and writes, or is sporadic in nature. + +

On the bright side, there now seems to be an effective alternative for +controlling I/O: the proportional-share policy provided by the bfq I/O +scheduler. It enables nearly 100% storage bandwidth utilization, +at least with some of the workloads that are problematic for +throttling. An upcoming version of bfq may be able to +achieve this result with almost all workloads. Finally, bfq +guarantees bandwidths with all workloads. The current limitation of +bfq is that its execution overhead becomes significant at speeds above +400,000 I/O operations per second on commodity CPUs. + +

Using the bfq I/O scheduler, Linux can now guarantee +low latency to lightweight flows containing sporadic, short I/O. No +throughput issues arise, and no configuration is required. This +capability benefits important, time-sensitive tasks, such as +video or audio streaming, as well as executing commands or starting +applications. + +Although benchmarks are not available yet, these guarantees might also be +provided by the newly proposed I/O latency +controller. It allows administrators to set target latencies for I/O +requests originating from each group of processes, and favors the +groups with the lowest target latency. + +

The testbed

+ +

I ran the tests with an ext4 filesystem mounted on a PLEXTOR +PX-256M5S SSD, which features a peak rate of ~160MB/s with random I/O, +and of ~500MB/s with sequential I/O. I used blk-mq, in Linux +4.18. The system was equipped with a 2.4GHz Intel Core i7-2760QM +CPU and 1.3GHz DDR3 DRAM. In such a system, a single thread doing +synchronous reads reaches a throughput of 23MB/s. + +

+For the purposes of these tests, each process is considered to be in one of +two groups, termed "target" and "interferers". +A target is a single-process, I/O-bound group whose I/O is focused on. In +particular, I measure the I/O throughput enjoyed by this group to get +the minimum bandwidth delivered to the group. +An interferer is single-process group whose role is to generate +additional I/O that interferes with the I/O of the target. +The tested workloads contain one target and multiple interferers. + +

The single process in each group either reads or writes, through +asynchronous (buffered) operations, to one file — different from the file read +or written by any other process — after invalidating the buffer cache +for the file. I define a reader or writer process as either "random" or +"sequential", depending on whether it reads or writes its file at random +positions or sequentially. +Finally, an interferer is defined as being either "active" or "inactive" +depending on whether it performs I/O during the test. When an +interferer is mentioned, it is assumed that the interferer is active. + +

Workloads are defined so as to try to cover the combinations that, I +believe, most influence the performance of the storage device and of +the I/O policies. For brevity, in this article I show results for only +two groups of workloads: +

+

    + +
  • Static sequential: four synchronous sequential readers or four + asynchronous sequential writers, plus five inactive interferers. + +
  • Static random: four synchronous random readers, all with a block + size equal to 4k, plus five inactive interferers. +
+ +

To create each workload, I considered, for each mix of +interferers in the group, two possibilities for the target: it could be +either a random or a sequential synchronous reader. + +In a +longer version of this article [PDF], you will also find results +for workloads with varying degrees of I/O randomness, and for +dynamic workloads (containing sporadic I/O sources). These extra results +confirm the losses of throughput and I/O control for throttling that +are shown here. + +

I/O policies

+ +

Linux provides two I/O-control mechanisms for guaranteeing (a minimum) +bandwidth, or at least fairness, to long-lived flows: the throttling +and proportional-share I/O policies. +With throttling, one can set a maximum bandwidth limit — "max limit" for +brevity — for the I/O of each group. Max limits can be used, +in an indirect way, to provide the service guarantee at the focus of this +article. For example, to guarantee minimum bandwidths to I/O flows, a group can +be guaranteed a minimum bandwidth by limiting the maximum bandwidth of +all the other groups. + +

Unfortunately, max limits have two drawbacks in terms of +throughput. First, if some groups do not use their allocated bandwidth, +that bandwidth cannot be reclaimed by other active groups. Second, +limits must comply with the worst-case speed of the device, namely, +its random-I/O peak rate. Such limits will clearly leave a lot of +throughput unused with workloads that otherwise would drive the +device to higher throughput levels. + +Maximizing throughput is simply not a goal of max limits. So, for +brevity, test results with max limits are not shown here. You can +find these results, plus a more detailed description of the above +drawbacks, in the long version of this article. + +

Because of these drawbacks, a new, still experimental, low limit +has been added to the throttling policy. If a group is +assigned a low limit, then the throttling policy automatically +limits the I/O of the other groups in such a way to +guarantee to the group a minimum bandwidth equal to its assigned low +limit. This new throttling mechanism throttles no group as long as +every group is getting at least its assigned minimum bandwidth. I tested +this mechanism, but did not consider the interesting problem +of guaranteeing minimum bandwidths while, at the same time, enforcing +maximum bandwidths. + +

The other I/O policy available in Linux, proportional share, +provides weighted fairness. Each group is assigned a weight, and should +receive a portion of the total throughput proportional to its weight. +This scheme guarantees minimum bandwidths in the same way that low limits do +in throttling. In particular, it guarantees to each group a minimum +bandwidth equal to the ratio between the weight of the group, and the +sum of the weights of all the groups that may be active at the same +time. + +

The actual implementation of the proportional-share policy, on a given +drive, depends on what flavor of the block layer is in use for that +drive. If the drive is using the legacy block interface, the policy is +implemented by +the cfq I/O scheduler. Unfortunately, cfq fails to control +bandwidths with flash-based storage, especially on drives featuring +command queueing. This case is not considered in these tests. With +drives using the multiqueue interface, +proportional share is implemented by bfq. This is the +combination considered in the tests. + +

To benchmark both throttling (low limits) and proportional share, I +tested, for each workload, the combinations of I/O policies and I/O +schedulers reported in the table below. In the end, there are three test +cases for each workload. In addition, for some workloads, I considered two +versions of bfq for the proportional-share policy. + +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Name I/O policy Scheduler Parameter for target Parameter for each +of the four active interferers Parameter for each of the five inactive +interferers Sum of parameters
low-noneThrottling with low limitsnone10MB/s10MB/s +(tot: 40)20MB/s (tot: 100)150MB/s
prop-bfqProportional sharebfq300100 (tot: 400)200 +(tot: 1000)1700
+
+ + + + +

For low limits, I report results with only none as the I/O scheduler, +because the results are the same with kyber and mq-deadline. + +

The capabilities of the storage medium and of low limits drove the policy +configurations. In particular: + +

    + +
  • The configuration of the target and of the active interferers for +low-none is the one for which low-none provides +its best possible minimum-bandwidth guarantee to the target: 10MB/s, +guaranteed if all interferers are readers. +Results remain the same regardless of the values used for target +latency and idle time; I set them to 100µs and +1000µs, respectively, for every group.
  • + +
  • Low limits for inactive interferers are set to twice the limits for +active interferers, to pose greater difficulties to the +policy.
  • + +
  • I chose weights for prop-bfq so as to guarantee about the same +minimum bandwidth as low-none to the target, in the same +only-reader worst case as for low-none and to preserve, between +the weights of active and inactive interferers, the same ratio as +between the low limits of active and inactive interferers.
  • +
+

Full details on configurations can be found in the long version of this +article. + +

Each workload was run ten times for each policy, plus ten times without +any I/O control, i.e., with none as I/O scheduler and no I/O policy in +use. For each run, I measured the I/O throughput of the target (which +reveals the bandwidth provided to the target), the cumulative I/O +throughput of the interferers, and the total I/O throughput. These +quantities fluctuated very little during each run, as well as across +different runs. Thus in the graphs I report only averages over per-run +average throughputs. In particular, for the case of no I/O control, I +report only the total I/O throughput, to give an idea of the throughput +that can be reached without imposing any control. + +

Results

+ +

+This plot shows throughput results for the simplest group of +workloads: the static-sequential set. + +

+[Figure 1] +
+

+ +With a random reader as +the target against sequential readers as interferers, low-none does +guarantee the configured low limit to the target. Yet it reaches only a +low total throughput. The throughput of +the random reader evidently oscillates around 10MB/s during the test. +This implies that it is at least slightly below 10MB/s for a significant +percentage of the time. But when this happens, the low-limit mechanism +limits the maximum bandwidth of every active group to the low limit set +for the group, i.e., to just 10MB/s. +The end result is a total throughput lower than 10% of the throughput +reached without I/O control. +

+That said, the high throughput achieved without I/O control is +obtained by choking the random I/O of the target in favor of +the sequential I/O of the interferers. Thus, it +is probably more interesting to compare low-none throughput with the +throughput reachable while actually guaranteeing 10MB/s to the target. +The target is a single, synchronous, random reader, which reaches 23MB/s while +active. So, to guarantee 10MB/s to the target, it is enough to +serve it for about half of the time, and the interferers for the other +half. Since the device reaches ~500MB/s with the sequential I/O of the +interferers, the resulting throughput with this service scheme would be +(500+23)/2, or about 260MB/s. low-none thus reaches less than 20% +of the +total throughput that could be reached while still preserving the target +bandwidth. + +

prop-bfq provides the target with a slightly higher throughput than +low-none. This makes it harder for prop-bfq to reach a high total +throughput, because prop-bfq serves more random I/O (from the target) +than low-none. Nevertheless, prop-bfq gets a much higher total +throughput than low-none. According to the above estimate, this +throughput is about 90% of the maximum throughput that could be reached, +for this workload, without violating service guarantees. The reason for +this good result is that bfq provides an effective implementation of +the proportional-share service policy. At any time, each active group is +granted a fraction of the current total throughput, and the sum of these +fractions is equal to one; so group bandwidths naturally saturate the +available total throughput at all times. + +

Things change with the second workload: a random reader against +sequential writers. Now low-none reaches a much higher total +throughput than prop-bfq. low-none serves +much more sequential (write) I/O than prop-bfq because writes somehow +break the low-limit mechanisms and prevail over the reads of the target. +Conceivably, this happens because writes tend to both starve reads in +the OS (mainly by eating all available I/O tags) and to cheat on their +completion time in the drive. In contrast, bfq is intentionally +configured to privilege reads, to counter these issues. + +

In particular, low-none gets an even higher throughput than no +I/O control at all because it penalizes the random I/O of the target even more +than the no-controller configuration. + +

Finally, with the last two workloads, prop-bfq reaches even +higher total throughput than with the first two. It happens +because the target also does sequential I/O, and serving sequential +I/O is much more beneficial for throughput than serving random I/O. With +these two workloads, the total throughput is, respectively, close to or +much higher than that reached without I/O control. For the last +workload, the total throughput is much higher because, differently from +none, bfq privileges reads over asynchronous writes, and reads yield +a higher throughput than writes. In contrast, low-none still gets +lower or much lower throughput than prop-bfq, because of the same +issues that hinder low-none throughput with the first two workloads. + +

As for bandwidth guarantees, with readers as interferers (third +workload), prop-bfq, as expected, gives the target a fraction of the +total throughput proportional to its weight. bfq approximates +perfect proportional-share bandwidth distribution among groups doing I/O +of the same type (reads or writes) and with the same locality +(sequential or random). With the last workload, prop-bfq gives much +more throughput to the reader than to all the interferers, because +interferers are asynchronous writers, and bfq privileges reads. + +

The second group of workloads (static random), is the one, among all +the workloads considered, for which prop-bfq performs worst. +Results are shown below: +

+

+[Figure 2] +
+

+ +This chart +reports results not only for mainline bfq, but also for an +improved version of +bfq which is currently under public testing. +As can be seen, with only random readers, prop-bfq reaches a +much lower total throughput than low-none. This happens because of +the Achilles heel of the bfq I/O scheduler. If the process in service +does synchronous I/O and has a higher weight than some other process, then, to +give strong bandwidth guarantees to that process, bfq plugs I/O +dispatching every time the process temporarily stops issuing +I/O requests. In this respect, processes actually have differentiated +weights and do synchronous I/O in the workloads tested. So bfq +systematically performs I/O plugging for them. Unfortunately, this +plugging empties the internal queues of the drive, which kills +throughput with random I/O. And the I/O of all processes in these +workloads is also random. + +

The situation reverses with a sequential reader as target. Yet, the most +interesting results come from the new version of bfq, containing +small changes to counter exactly the above weakness. This +version recovers most of the throughput loss with the workload made of +only random I/O and more; with the second workload, where the target is +a sequential reader, it reaches about 3.7 times the total throughput of +low-none. +

+ +When the main concern is the latency of flows containing short I/O, +Linux seems now rather high performing, thanks to the bfq I/O +scheduler and the I/O latency controller. But if the +requirement is to provide explicit bandwidth guarantees (or just fairness) to +I/O flows, then one must be ready to give up much or most of the speed of +the storage media. bfq helps with some workloads, but loses most of +the throughput with workloads consisting of mostly random +I/O. Fortunately, there is apparently hope for much better +performance since an improvement, still under development, seems to +enable bfq to reach a high throughput with all workloads tested so +far. + + + +

+[ I wish to thank Vivek Goyal for enabling me to make this article +much more fair and sound.]

+
+ +

Comments (4 posted) +

+

KDE's onboarding initiative, one year later

+ +
+

August 24, 2018

+

This article was contributed by Marta Rybczyńska

+
+Akademy +
+

In 2017, the KDE community decided on three +goals +to concentrate on for the next few years. One of them was streamlining the onboarding of new +contributors (the others were improving +usability and privacy). +During Akademy, the yearly KDE +conference +that was held in Vienna in August, Neofytos Kolokotronis shared the status +of the +onboarding goal, the work done during the last year, and further plans. +While it is a complicated process in a project as big and diverse as KDE, +numerous improvements have been already made.

+ +

Two of the three KDE community goals were proposed by relative +newcomers. Kolokotronis was one of those, having joined the KDE Promo team +not long before proposing +the focus on onboarding. He had previously been involved with Chakra +Linux, a distribution based on KDE software. The fact that new +members of the community proposed strategic goals was also noted in the Sunday keynote +by Claudia Garad.

+ +

Proper onboarding adds excitement to the contribution process and +increases retention, he explained. When we look at the definition of +onboarding, +it is a process in which the new contributors acquire knowledge, skills, and +behaviors so that they can contribute effectively. Kolokotronis proposed +to see it also as socialization: integration into the project's relationships, +culture, structure, and procedures.

+ +

The gains from proper onboarding are many. The project can grow by +attracting new blood with new perspectives and solutions. The community +maintains its health and stays vibrant. Another important advantage of +efficient onboarding is that replacing current contributors becomes easier +when they change interests, jobs, or leave the project for whatever reason. +Finally, successful onboarding adds new advocates to the project.

+ +

Achievements so far and future plans

+ +

The team started with ideas for a centralized onboarding process for the +whole of KDE. They found out quickly that this would not work because KDE +is "very decentralized", so it is hard to provide tools and +procedures that are going to work for the whole project. According to +Kolokotronis, other characteristics of KDE that impact onboarding are high +diversity, remote and online teams, and hundreds of contributors in dozens of +projects and teams. In addition, new contributors already know in which +area they want to take part and they prefer specific information that will +be directly useful for them.

+ +

So the team changed its approach; several changes have since been proposed +and implemented. The Get +Involved page, which is expected to be one of the resources new +contributors read first, has been rewritten. For the Junior Jobs page, the +team is + +[Neofytos
+Kolokotronis] + + +discussing what the +generic content for KDE as a whole should be. The team simplified Phabricator registration, +which +resulted in documenting the process better. Another part of the work +includes the KDE Bugzilla; it includes, +for example initiatives to limit the number of +states of a ticket or remove obsolete products.

+ +

The Plasma Mobile +team is heavily involved in the onboarding goal. The Plasma Mobile +developers have simplified their +development environment setup and created an interactive "Get +Involved" page. In addition, the Plasma team changed the way task +descriptions are written; they now contain more detail, so that it is +easier to get +involved. The basic description should be short and clear, and it should include +details of the problem and possible solutions. The developers try to +share the list of skills necessary to fulfill the tasks and include clear +links to the technical resources needed.

+ +

Kolokotronis and team also identified a new potential source of +contributors for KDE: distributions using +KDE. They have the advantage of already knowing and using the software. + +The next idea the team is working on is to make sure that setting up a +development environment is easy. The team plans to work on this during a +dedicated sprint this autumn.

+ +

Searching for new contributors

+ +

Kolokotronis plans to search for new contributors at the periphery of the +project, among the "skilled enthusiasts": loyal users who actually care +about the project. They "can make wonders", he said. Those +individuals may be also less confident or shy, have troubles making the +first step, and need guidance. The project leaders should take that into +account.

+ +

In addition, newcomers are all different. Kolokotronis +provided a long list of how contributors differ, +including skills and knowledge, motives and +interests, and time and dedication. His advice is to "try to find their +superpower", the skills they have that are missing in the team. Those +"superpowers" can then be used for the benefit of the project.

+ +

If a project does nothing else, he said, it can start with its documentation. +However, this does not only mean code documentation. Writing down the +procedures or information about the internal work of the project, like who +is working on what, is an important part of a project's documentation and helps +newcomers. There should be also guidelines on how to start, especially +setting up the development environment.

+ +

The first thing the project leaders should do, according to +Kolokotronis, is to spend time on introducing newcomers to the project. +Ideally every new contributor should be assigned mentors — more +experienced members who can help them when needed. The mentors and project +leaders should find tasks that are interesting for each person. Answering +an audience question on suggestions for shy new +contributors, he recommended even more mentoring. It is also very helpful +to make sure that newcomers have enough to read, but "avoid RTFM", he highlighted. It +is also easy for a new contributor "to fly away", he said. The solution is +to keep requesting things and be proactive.

+ +

What the project can do?

+ +

Kolokotronis suggested a number of actions for a project when it wants to +improve its onboarding. The first step is preparation: the project +leaders should know the team's and the project's needs. Long-term +planning is important, too. It is not enough to wait for contributors to +come — the project should be proactive, which means reaching out to +candidates, suggesting appropriate tasks and, finally, making people +available for the newcomers if they need help.

+ +

This leads to next step: to be a mentor. Kolokotronis suggests being a +"great host", but also trying to phase out the dependency on the mentor +rapidly. "We have +been all newcomers", he said. It can be intimidating to join an existing +group. Onboarding creates a sense of belonging which, in turn, increases +retention.

+ +

The last step proposed was to be strategic. This includes thinking about +the emotions you want newcomers to feel. Kolokotronis explained the +strategic part with an example. The overall goal is (surprise!) improve +onboarding +of new contributors. An intermediate objective might be to keep the +newcomers after they have made their first commit. If your strategy is to keep them +confident and proud, you can use different tactics like praise and +acknowledgment of the work in public. Another useful tactic may be assigning +simple tasks, according to the skill of the contributor.

+ +

To summarize, the most important thing, according to Kolokotronis, is to +respond quickly and spend time with new contributors. This time should be +used to explain procedures, and to introduce the people and culture. It is also +essential to guide first contributions and praise contributor's skill and +effort. +Increase the difficulty of tasks over time to keep contributors motivated and +challenged. And finally, he said, +"turn them into mentors".

+ +

Kolokotronis acknowledges that onboarding "takes time" and "everyone +complains" about it. However, he is convinced that it is beneficial in the +long term +and that it decreases developer turnover.

+ +

Advice to newcomers

+ +

Kolokotronis concluded with some suggestions for newcomers to a +project. They should try +to be persistent and to not get discouraged when something goes wrong. +Building connections from the very beginning is helpful. He suggests +asking questions as if you were already a member "and things will be fine". +However, accept criticism if it happens.

+ +

One of the next actions of the onboarding team will be to collect +feedback from newcomers and experienced contributors to see if they agree +on the ideas and processes introduced so far.

+

Comments (none posted) +

+

Sharing and archiving data sets with Dat

+ +
+

August 27, 2018

+

This article was contributed by Antoine Beaupré

+
+

Dat is a new peer-to-peer protocol +that uses some of the concepts of +BitTorrent and Git. Dat primarily +targets researchers and +open-data activists as it is a great tool for sharing, archiving, and +cataloging large data sets. But it can also be used to implement +decentralized web applications in a novel way.

+ +

Dat quick primer

+ +

Dat is written in JavaScript, so it can be installed with npm, but +there are standalone +binary builds and +a desktop application (as an AppImage). An online viewer can +be used to inspect data for those who do not want to install +arbitrary binaries on their computers.

+ +

The command-line application allows basic operations like downloading +existing data sets and sharing your own. +Dat uses a 32-byte hex string that is an ed25519 public key, which is +is used to discover and find content on the net. +For example, this will +download some sample data:

+ +
+    $ dat clone \
+      dat://778f8d955175c92e4ced5e4f5563f69bfec0c86cc6f670352c457943666fe639 \
+      ~/Downloads/dat-demo
+
+ +

Similarly, the share command is used to share content. It indexes +the files in a given directory and creates a new unique address like +the one above. The share +command starts a server that uses multiple discovery mechanisms (currently, the Mainline Distributed +Hash Table (DHT), a custom DNS server, and +multicast DNS) to announce the content to its peers. This is how +another user, armed with that public key, can download that content +with dat clone or mirror the files continuously with +dat sync.

+ +

So far, this looks a lot like BitTorrent magnet links updated +with 21st century cryptography. But Dat adds revisions on top of that, +so modifications are automatically shared through the swarm. That is +important for public data sets as those +are often dynamic in nature. Revisions also make it possible to use +Dat as a backup system by saving the data incrementally using an +archiver.

+ +

While Dat is designed to work on larger data sets, processing them +for sharing may take a while. For example, sharing the Linux +kernel source code required about five minutes as Dat worked on +indexing all of the files. This is comparable to the performance offered by +IPFS and BitTorrent. Data sets with +more or larger files may take quite a bit more time. + +

+One advantage that Dat has over IPFS is that it +doesn't duplicate the data. When IPFS imports new data, it duplicates +the files into ~/.ipfs. For collections of small files like the +kernel, this is not a huge problem, but for larger files like videos or +music, it's a significant limitation. IPFS eventually implemented a +solution to this problem in the form of the experimental +filestore feature, but it's not enabled by default. Even with +that feature enabled, though, changes to data sets are not automatically +tracked. In comparison, Dat operation on dynamic data feels much +lighter. The downside is that each set needs its own dat share +process.

+ +

Like any peer-to-peer system, Dat needs at least one peer to stay online to +offer the content, which is impractical for mobile devices. Hosting +providers like Hashbase (which is a pinning service in Dat +jargon) can help users keep content online without running their own +server. The closest parallel in the traditional web ecosystem +would probably be content distribution networks (CDN) although pinning +services are not necessarily geographically distributed and a CDN does +not necessarily retain a complete copy of a website.

+ + +[Photo app] + + +

A web browser called Beaker, based on the Electron framework, +can access Dat content natively without going through a pinning +service. Furthermore, Beaker is essential to get any of the Dat +applications working, as they fundamentally rely on dat:// URLs +to do their magic. This means that Dat applications won't work for +most users unless they install that special web browser. There is a +Firefox extension called "dat-fox" for people who don't want +to install yet another browser, but it requires installing a +helper program. The extension will be able to load dat:// URLs +but many applications will still not work. For example, the photo gallery +application completely fails with dat-fox.

+ +

Dat-based applications look promising from a privacy point of view. +Because of its peer-to-peer nature, users regain control over where +their data is stored: either on their own computer, an online server, or +by a trusted third party. But considering the protocol is not well +established in current web browsers, I foresee difficulties in +adoption of that aspect of the Dat ecosystem. Beyond that, it is rather +disappointing that Dat applications cannot run natively in a web +browser given that JavaScript is designed exactly for that.

+ +

Dat privacy

+ +

An advantage Dat has over other peer-to-peer protocols like BitTorrent +is end-to-end encryption. I was originally concerned by the encryption +design when reading the academic +paper [PDF]:

+ +
+

It is up to client programs to make design decisions around which + discovery networks they trust. For example if a Dat client decides + to use the BitTorrent DHT to discover peers, and + they are searching + for a publicly shared Dat key (e.g. a key cited publicly in a + published scientific paper) with known contents, then because of the + privacy design of the BitTorrent DHT it becomes public knowledge + what key that client is searching for.

+
+ +

So in other words, to share a secret file with another user, the +public key is transmitted over a secure side-channel, only to then +leak during the discovery process. Fortunately, the public Dat key +is not directly used during discovery as it is hashed +with BLAKE2B. Still, the security model of Dat assumes the public +key is private, which is a rather counterintuitive concept that might upset +cryptographers and confuse users who are frequently encouraged to type +such strings in address bars and search engines as part of the Dat +experience. There is a security & privacy FAQ +in the Dat +documentation warning about this problem:

+ +
+

One of the key elements of Dat privacy is that the public key is + never used in any discovery network. The public key is hashed, + creating the discovery key. Whenever peers attempt to connect to + each other, they use the discovery key.

+ +

Data is encrypted using the public key, so it is important that this + key stays secure.

+
+ +

There are other privacy issues outlined in the +document; it states that "Dat faces similar privacy risks as +BitTorrent":

+ +
+

When you download a dataset, your IP address is exposed to the users + sharing that dataset. This may lead to honeypot servers collecting + IP addresses, as we've seen in Bittorrent. However, with dataset + sharing we can create a web of trust model where specific + institutions are trusted as primary sources for datasets, + diminishing the sharing of IP addresses.

+
+ +

A Dat blog post refers to this issue as reader privacy and it +is, indeed, a sensitive issue in peer-to-peer networks. It is how +BitTorrent users are discovered and served scary verbiage from lawyers, +after all. But Dat makes this a little better because, to join a swarm, +you must know what you are looking for already, which means peers who +can look at swarm activity only include users who know the secret +public key. This works well for secret content, but for larger, public +data sets, it is a real problem; it is why the Dat project has avoided +creating a Wikipedia mirror so far.

+ +

I found another privacy issue that is not documented in the security FAQ +during my review of the protocol. As mentioned earlier, +the Dat discovery +protocol routinely + phones home to DNS servers operated by the Dat project. +This implies that the default discovery servers (and an +attacker watching over their traffic) know who is publishing or seeking +content, in essence discovering the "social network" behind Dat. This +discovery mechanism can be disabled in clients, but a similar privacy +issue applies to the DHT as well, although that is distributed so it +doesn't require trust of the Dat project itself.

+ +

Considering those aspects of the protocol, privacy-conscious users +will probably want to use Tor or other anonymization techniques to +work around those concerns.

+ +

The future of Dat

+ +

Dat 2.0 was released in June 2017 with performance improvements and +protocol changes. Dat +Enhancement Proposals (DEPs) guide the project's +future development; most work is currently geared toward +implementing the draft "multi-writer proposal" in +HyperDB. Without +multi-writer support, only the +original publisher of a Dat can modify it. According to Joe Hand, +co-executive-director of Code for Science & Society (CSS) and +Dat core developer, in an IRC chat, "supporting multiwriter is a big requirement for lots +of folks". For example, while Dat might allow Alice to share her +research results with Bob, he cannot modify or contribute back to those +results. The multi-writer extension allows for Alice to assign trust +to Bob so he can have write access to the data. + +

+Unfortunately, the +current proposal doesn't solve the "hard problems" of +"conflict merges +and secure key distribution". The former will be worked out through +user interface tweaks, but the latter is a classic problem that security +projects have typically trouble finding +solutions for—Dat is no exception. How will Alice securely trust +Bob? The OpenPGP web of trust? Hexadecimal fingerprints read over the +phone? Dat doesn't provide a magic solution to this problem.

+ +

Another thing limiting adoption is that Dat is not packaged in any +distribution that I could find (although I requested it in +Debian) and, considering the speed of change of the JavaScript +ecosystem, this is unlikely to change any time soon. A Rust +implementation of the Dat protocol has started, however, which +might be easier to package than the multitude of Node.js +modules. In terms of mobile device support, there is an experimental +Android web browser with Dat support called Bunsen, which somehow +doesn't run on my phone. Some adventurous users have successfully run Dat +in Termux. I haven't found an app +running on iOS at this +point.

+ +

Even beyond platform support, distributed protocols like Dat have a +tough slope to climb against the virtual monopoly of more centralized +protocols, so it remains to be seen how popular those tools will +be. Hand says Dat is supported by multiple non-profit +organizations. Beyond CSS, Blue Link Labs is working on the +Beaker Browser as a self-funded startup and a grass-roots +organization, Digital Democracy, has contributed to the +project. The Internet Archive has announced a collaboration +between itself, CSS, and the California Digital Library to launch a pilot +project to see "how members of a cooperative, decentralized +network can leverage shared services to ensure data preservation while +reducing storage costs and increasing replication counts". + +

+Hand said +adoption in academia has been "slow but steady" and that the Dat in +the Lab project has helped identify areas that could help +researchers adopt the project. Unfortunately, as is the case with many +free-software projects, he said that "our team is definitely a bit +limited on bandwidth to push for bigger adoption". Hand +said that the project received a grant from Mozilla Open Source +Support to improve its documentation, which will be a big help.

+ +

Ultimately, Dat suffers from a problem common to all peer-to-peer +applications, which is naming. Dat addresses are not exactly +intuitive: humans do not remember strings of 64 hexadecimal characters +well. For this, Dat took a similar approach to IPFS by using +DNS TXT records and /.well-known URL paths to bridge existing, +human-readable names with Dat hashes. So this sacrifices a part of the +decentralized nature of the project in favor of usability.

+ +

I have tested a lot of distributed protocols like Dat in the past and +I am not sure Dat is a clear winner. It certainly has advantages over +IPFS in terms of usability and resource usage, but the lack of +packages on most platforms is a big limit to adoption for most +people. This means it will be difficult to share content with my +friends and family with Dat anytime soon, which would probably be my +primary use case for the project. Until the protocol reaches the wider +adoption that BitTorrent has seen in terms of platform support, I will +probably wait before switching everything over to this +promising project.

+

Comments (11 posted) +

+

+Page editor: Jonathan Corbet
+

Inside this week's LWN.net Weekly Edition

+
    +
  • Briefs: OpenSSH 7.8; 4.19-rc1; Which stable?; Netdev 0x12; Bison 3.1; Quotes; ... +
  • Announcements: Newsletters; events; security updates; kernel patches; ... +
+Next page: + Brief items>>
+ +
+
+
+
+
+
+ +
+
+

+ + Copyright © 2018, Eklektix, Inc.
+ + Comments and public postings are copyrighted by their creators.
+ Linux is a registered trademark of Linus Torvalds
+
+

+ + + + + + \ No newline at end of file diff --git a/test/source/LWN/Articles/763987.html b/test/source/LWN/Articles/763987.html new file mode 100644 index 0000000..153bfda --- /dev/null +++ b/test/source/LWN/Articles/763987.html @@ -0,0 +1,136 @@ + + + Kernel prepatch 4.19-rc2 [LWN.net] + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+ User: Password:
| +
+ +
| +
+ +
+
+ +
+
+
+

Kernel prepatch 4.19-rc2

+ +
+
+The 4.19-rc2 kernel prepatch is out for +testing. +"As usual, the rc2 release is pretty small. People are taking a +breather after the merge window, and it takes a bit of time for bug +reports to start coming in and get identified."
+ (Log in to post comments) +

+ +

+

+

+
+
+
+
+ +
+
+

+ + Copyright © 2018, Eklektix, Inc.
+ + Comments and public postings are copyrighted by their creators.
+ Linux is a registered trademark of Linus Torvalds
+
+

+ + + + + + \ No newline at end of file diff --git a/test/source/LWN/Articles/764046.html b/test/source/LWN/Articles/764046.html new file mode 100644 index 0000000..0a6c792 --- /dev/null +++ b/test/source/LWN/Articles/764046.html @@ -0,0 +1,329 @@ + + + Security updates for Monday [LWN.net] + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+ User: Password:
| +
+ +
| +
+ +
+
+ +
+
+
+

Security updates for Monday

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Dist.IDReleasePackageDate
DebianDLA-1492-1LTSdojo2018-09-03
DebianDLA-1487-1LTSlibtirpc2018-08-31
DebianDLA-1488-1LTSmariadb-10.02018-08-31
DebianDLA-1490-1LTSphp52018-09-01
DebianDSA-4283-1stableruby-json-jwt2018-08-31
DebianDLA-1488-1LTSspice2018-08-31
DebianDLA-1486-1LTSspice2018-09-01
DebianDLA-1489-1LTSspice-gtk2018-09-01
DebianDLA-1491-1LTStomcat82018-09-02
DebianDSA-4282-1stabletrafficserver2018-08-31
FedoraFEDORA-2018-33fef25ed1F28ghc-hakyll2018-08-31
FedoraFEDORA-2018-33fef25ed1F28ghc-hs-bibutils2018-08-31
FedoraFEDORA-2018-07083800acF28ghostscript2018-09-02
FedoraFEDORA-2018-77e610115aF28mariadb2018-08-31
FedoraFEDORA-2018-33fef25ed1F28pandoc-citeproc2018-08-31
FedoraFEDORA-2018-f2b24ce26eF28phpMyAdmin2018-08-31
FedoraFEDORA-2018-915602df63F27xen2018-08-31
MageiaMGASA-2018-03666java-1.8.0-openjdk2018-09-02
MageiaMGASA-2018-03616libarchive2018-08-31
MageiaMGASA-2018-03676libgd2018-09-02
MageiaMGASA-2018-03566libraw2018-08-31
MageiaMGASA-2018-03646libxcursor2018-08-31
MageiaMGASA-2018-03595mariadb2018-08-31
MageiaMGASA-2018-03555, 6mercurial2018-08-31
MageiaMGASA-2018-03636openssh2018-08-31
MageiaMGASA-2018-03656openssl2018-09-02
MageiaMGASA-2018-03586poppler2018-08-31
MageiaMGASA-2018-03626quazip2018-08-31
MageiaMGASA-2018-03576squirrelmail2018-08-31
MageiaMGASA-2018-03606virtualbox2018-08-31
openSUSEopenSUSE-SU-2018:2590-142.3cobbler2018-09-03
openSUSEopenSUSE-SU-2018:2592-115.0libressl2018-09-03
openSUSEopenSUSE-SU-2018:2587-142.3wireshark2018-09-02
openSUSEopenSUSE-SU-2018:2591-115.0 42.3zutils2018-09-03
SUSESUSE-SU-2018:2576-1OS7 OpenStack2018-08-31
SUSESUSE-SU-2018:2578-1OS7 couchdb2018-08-31
SUSESUSE-SU-2018:2574-1SLE11java-1_7_0-ibm2018-08-31
SUSESUSE-SU-2018:2583-1SLE11java-1_7_1-ibm2018-08-31
SUSESUSE-SU-2018:2584-1SLE12spice2018-08-31
+
+ (Log in to post comments) +

+ +

+

+ +

+
+
+
+
+ + +
+
+

+ + Copyright © 2018, Eklektix, Inc.
+ + Comments and public postings are copyrighted by their creators.
+ Linux is a registered trademark of Linus Torvalds
+
+

+ + + + + + \ No newline at end of file diff --git a/test/source/LWN/Articles/764055.html b/test/source/LWN/Articles/764055.html new file mode 100644 index 0000000..f793db2 --- /dev/null +++ b/test/source/LWN/Articles/764055.html @@ -0,0 +1,242 @@ + + + Topics sought for the Kernel and Maintainer Summits [LWN.net] + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+ User: Password:
| +
+ +
| +
+ +
+
+ +
+
+
+

Topics sought for the Kernel and Maintainer Summits

+
+
+The annual Maintainer and Kernel Summits will be held in Vancouver, BC on +November 12 to 15, in conjunction with the Linux Plumbers Conference. +The program committee is looking for topics for both summits; read on for +details on how to submit ideas and, perhaps, get an invitation to the +Maintainer Summit. +


+ + + + + + + + + + + + + +
From: "Theodore Y. Ts'o" <tytso-AT-mit.edu>
To: linux-kernel-AT-vger.kernel.org, linux-fsdevel-AT-vger.kernel.org, linux-mm-AT-kvack.org, netdev-AT-vger.kernel.org, linux-block-AT-vger.kernel.org
Subject: Maintainer / Kernel Summit 2018 planning kick-off
Date: Thu, 30 Aug 2018 17:35:17 -0400
Message-ID: <20180830213517.GA19110@thunk.org>
Archive-link: Article

+

+[ Feel free to forward this to other Linux kernel mailing lists as
+  appropriate -- Ted ]
+
+This year, the Maintainer and Kernel Summit will be in Vancouver,
+B.C., November 12th -- 15th.  The Maintainer's summit will be held on
+Monday, November 12th, in Vancouver, immediately before the Linux
+Plumber's Conference (LPC) November 13th -- 15th.
+
+For the past few years, before 2017, we've scheduled mostly management
+and development process issues on the first day.  We then opened up
+the second day of the Kernel Summit to all attendees of the conference
+with which the Kernel Summit has been colocated, and called it the
+"Open Technical Day".  This is something that just made sense in order
+to assure that all of the necessary people needed to discuss a
+particular technical issue could be in the room.
+
+Starting last year in Prague, we took the next logical step, and split
+the Kernel Summit in two.  The "Maintainer's Summit" is an
+invite-only, half-day event, where the primary focus will be process
+issues of Linux Kernel Development.  It will be limited to 30 invitees
+and a handful of sponsored attendees.  This makes it smaller than the
+first few kernel summits (which were limited to around 50 attendees).
+
+The "Kernel Summit" is now organized as a track which is run in
+parallel with the other tracks at the Linux Plumber's Conference, and
+is open to all registered attendees of Plumbers.  Much as how we
+organized the Kernel Summit "open technical day" in 2016 in Santa Fe,
+the Kernel Summit schedule will be synchronized with the other tracks
+at the Plumber's Conference, and it will be open to all registered
+Plumber's attendees.
+
+Linus has suggested the following ten people as the core of the people
+he would like invited to the Maintainer's Summit, which was calculated
+from statistics from his git tree.
+
+	David Miller
+	Dave Airlie
+	Greg KH
+	Arnd Bergmann
+	Ingo Molnar
+	Mauro Carvalho Chehab
+	Takashi Iwai
+	Thomas Gleixner
+	Andrew Morton
+	Olof Johansson
+
+As we did last year, there will be a mini-program committee that will
+be pick enough names to bring the total number of 30 for the
+Maintainer's Summit.  That program committee will consist of Arnd
+Bergmann, Thomas Gleixner, Greg KH, Paul McKenney, and Ted Ts'o.
+
+We will use the rest of names on the list generated by Linus's script
+as a starting point of people to be considered.  People who suggest
+topics that should be discussed on the Maintainer's summit will also
+be added to the list.  To make topic suggestions for the Maintainer's
+Summit, please send e-mail to the ksummit-discuss list with a subject
+prefix of [MAINTAINERS SUMMIT].
+
+
+The other job of the program committee will be to organize the program
+for the Kernel Summit.  The goal of the Kernel Summit track will be to
+provide a forum to discuss specific technical issues that would be
+easier to resolve in person than over e-mail.  The program committee
+will also consider "information sharing" topics if they are clearly of
+interest to the wider development community (i.e., advanced training
+in topics that would be useful to kernel developers).
+
+To suggest a topic for the Kernel Summit, please tag your e-mail with
+[TECH TOPIC].  As before, please use a separate e-mail for each topic,
+and send the topic suggestions to:
+
+	ksummit-discuss@lists.linuxfoundation.org
+
+People who submit topic suggestions before September 21st and which
+are accepted, will be given a free admission to the Linux Plumbers
+Conference.
+
+We will reserving roughly half the Kernel Summit slots for last-minute
+discussions that will be scheduled during the week of Plumber's, in an
+"unconference style".  This was extremely popular in Santa Fe and in
+Prague, since it allowed ideas that came up in hallway discussions,
+and in Plumber's Miniconference, to be given scheduled, dedicated
+times for that discussion.
+
+
+If you were not subscribed on to the kernel-discuss mailing list from
+last year (or if you had removed yourself after the kernel summit),
+you can subscribe to the discuss list using mailman:
+
+   https://lists.linuxfoundation.org/mailman/listinfo/ksummi...
+
+ (Log in to post comments) +

+ +

+

+

+
+
+
+
+ +
+
+

+ + Copyright © 2018, Eklektix, Inc.
+ + Comments and public postings are copyrighted by their creators.
+ Linux is a registered trademark of Linus Torvalds
+
+

+ + + + + + \ No newline at end of file diff --git a/test/source/LWN/Articles/764130.html b/test/source/LWN/Articles/764130.html new file mode 100644 index 0000000..2fbae67 --- /dev/null +++ b/test/source/LWN/Articles/764130.html @@ -0,0 +1,234 @@ + + + Security updates for Tuesday [LWN.net] + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+ User: Password:
| +
+ +
| +
+ +
+
+ +
+
+
+

Security updates for Tuesday

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Dist.IDReleasePackageDate
openSUSEopenSUSE-SU-2018:2600-115.0ImageMagick2018-09-04
openSUSEopenSUSE-SU-2018:2597-142.3libressl2018-09-04
openSUSEopenSUSE-SU-2018:2599-115.0postgresql102018-09-04
openSUSEopenSUSE-SU-2018:2598-115.0spice2018-09-04
openSUSEopenSUSE-SU-2018:2602-142.3spice2018-09-04
openSUSEopenSUSE-SU-2018:2601-142.3spice-gtk2018-09-04
Red HatRHSA-2018:2616-01EL7RHGS WA2018-09-04
Red HatRHSA-2018:2608-01EL6Red Hat Gluster Storage2018-09-04
Red HatRHSA-2018:2607-01EL7Red Hat Gluster Storage2018-09-04
Red HatRHSA-2018:2626-01EL7Red Hat Virtualization2018-09-04
Red HatRHSA-2018:2615-01EL7collectd2018-09-04
Red HatRHSA-2018:2645-01EL6.7kernel2018-09-04
Red HatRHSA-2018:2643-01EL7rhvm-appliance2018-09-04
Red HatRHSA-2018:2612-01EL6samba2018-09-04
Red HatRHSA-2018:2613-01EL7samba2018-09-04
SUSESUSE-SU-2018:2603-1OS7 crowbar, crowbar-core, crowbar-ha, crowbar-openstack, crowbar-ui2018-09-04
SUSESUSE-SU-2018:2596-1SLE12kernel2018-09-03
SUSESUSE-SU-2018:2595-1SLE12spice2018-09-03
SUSESUSE-SU-2018:2594-1SLE12spice-gtk2018-09-03
SUSESUSE-SU-2018:2593-1SLE12spice-gtk2018-09-03
+
+ (Log in to post comments) +

+ +

+

+ +

+
+
+
+
+ + +
+
+

+ + Copyright © 2018, Eklektix, Inc.
+ + Comments and public postings are copyrighted by their creators.
+ Linux is a registered trademark of Linus Torvalds
+
+

+ + + + + + \ No newline at end of file diff --git a/test/source/LWN/Articles/764182.html b/test/source/LWN/Articles/764182.html new file mode 100644 index 0000000..aaf1c1b --- /dev/null +++ b/test/source/LWN/Articles/764182.html @@ -0,0 +1,164 @@ + + + Security updates for Wednesday [LWN.net] + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+ User: Password:
| +
+ +
| +
+ +
+
+ +
+
+
+

Security updates for Wednesday

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Dist.IDReleasePackageDate
DebianDSA-4284-1stablelcms22018-09-04
openSUSEopenSUSE-SU-2018:2623-142.3yubico-piv-tool2018-09-05
OracleELSA-2018-4208OL6kernel2018-09-04
OracleELSA-2018-4208OL7kernel2018-09-04
SUSESUSE-SU-2018:2608-1cobbler2018-09-04
SUSESUSE-SU-2018:2615-1SLE11kvm2018-09-05
+
+ (Log in to post comments) +

+ +

+

+ +

+
+
+
+
+ + +
+
+

+ + Copyright © 2018, Eklektix, Inc.
+ + Comments and public postings are copyrighted by their creators.
+ Linux is a registered trademark of Linus Torvalds
+
+

+ + + + + + \ No newline at end of file diff --git a/test/source/LWN/Articles/764184.html b/test/source/LWN/Articles/764184.html new file mode 100644 index 0000000..a81daf3 --- /dev/null +++ b/test/source/LWN/Articles/764184.html @@ -0,0 +1,135 @@ + + + A set of stable kernels [LWN.net] + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+ User: Password:
| +
+ +
| +
+ +
+
+ +
+
+
+

A set of stable kernels

+ +
+
+Greg Kroah-Hartman has released stable kernels 4.18.6, 4.14.68, 4.9.125, 4.4.154, and 3.18.121. They all contain important fixes and +users should upgrade.
+ (Log in to post comments) +

+ +

+

+

+
+
+
+
+ +
+
+

+ + Copyright © 2018, Eklektix, Inc.
+ + Comments and public postings are copyrighted by their creators.
+ Linux is a registered trademark of Linus Torvalds
+
+

+ + + + + + \ No newline at end of file diff --git a/test/source/LWN/Articles/764202.html b/test/source/LWN/Articles/764202.html new file mode 100644 index 0000000..141a5a8 --- /dev/null +++ b/test/source/LWN/Articles/764202.html @@ -0,0 +1,142 @@ + + + Firefox 62.0 released [LWN.net] + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+ User: Password:
| +
+ +
| +
+ +
+
+ +
+
+
+

Firefox 62.0 released

+ +
+
+Mozilla has released Firefox 62.0, with several new features. The Firefox +Home (default New Tab) allows users to display up to 4 rows of top sites, +Pocket stories, and highlights; for those using containers there is menu +option to reopen a tab in a different container; Firefox 63 will remove all +trust for Symantec-issued certificates, and it is optional in Firefox +62; FreeBSD support for WebAuthn was added; and more. See the release +notes for details.
+ (Log in to post comments) +

+ +

+

+

+
+
+
+
+ +
+
+

+ + Copyright © 2018, Eklektix, Inc.
+ + Comments and public postings are copyrighted by their creators.
+ Linux is a registered trademark of Linus Torvalds
+
+

+ + + + + + \ No newline at end of file diff --git a/test/source/LWN/Articles/764219.html b/test/source/LWN/Articles/764219.html new file mode 100644 index 0000000..346deb3 --- /dev/null +++ b/test/source/LWN/Articles/764219.html @@ -0,0 +1,217 @@ + + + GNOME 3.30 released [LWN.net] + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+ User: Password:
| +
+ +
| +
+ +
+
+ +
+
+
+

GNOME 3.30 released

+
+
+The GNOME Project has announced the release of GNOME 3.30 +"Almería". "This release brings automatic updates in Software, more +games, and a new Podcasts application. Improvements to core GNOME +applications include a refined location and search bar in Files, a +[Thunderbolt] panel in Settings, support for remoting using RDP in Boxes, and +many more." The release notes +contain more information. +


+ + + + + + + + + + + + + + + +
From: Matthias Clasen via devel-announce-list <devel-announce-list-AT-gnome.org>
To: gnome-announce-list-AT-gnome.org, devel-announce-list-AT-gnome.org
Subject: GNOME 3.30 released
Date: Wed, 5 Sep 2018 16:41:54 -0400
Message-ID: <CAFwd_vCdnMhopZsZMq2M-N7DfQbUheTCfDb--Lgn6rrAXPyfdQ@mail.gmail.com>
Cc: Matthias Clasen <matthias.clasen-AT-gmail.com>
Archive-link: Article

+

+The GNOME Project is proud to announce the release of GNOME 3.30, “Almería”
+
+This release brings automatic updates in Software, more games, and a new
+Podcasts application.
+
+Improvements to core GNOME applications include a refined location and
+search
+bar in Files, a Thunderbold panel in Settings, support for remoting using
+RDP
+in Boxes, and many more.
+
+More information about the changes in GNOME 3.30 can be found in the
+release notes:
+
+ https://help.gnome.org/misc/release-notes/3.30/
+
+For the release team, this release is particularly exciting because it is
+the
+first one that has been produced and verified with our new CI infrastructure
+in gitlab.gnome.org.
+
+GNOME 3.30 will be available shortly in many distributions. If you want to
+try it
+today, you can use the soon-to-be-released Fedora 29 or the openSUSE nightly
+live images which will both include GNOME 3.30 very soon.
+
+ https://www.gnome.org/getting-gnome/
+
+http://download.opensuse.org/repositories/GNOME:/Medias/i...
+
+To try the very latest developments in GNOME, you can also use Fedora
+Silverblue,
+whose rawhide branch always includes the latest GNOME packages.
+
+
+https://kojipkgs.fedoraproject.org/compose/rawhide/latest...
+
+If you are interested in building applications for GNOME 3.30, look for the
+GNOME 3.30 Flatpak SDK, which will be available in the sdk.gnome.org
+repository
+soon.
+
+This six-month effort wouldn't have been possible without the whole
+GNOME community, made of contributors and friends from all around the
+world: developers, designers, documentation writers, usability and
+accessibility specialists, translators, maintainers, students, system
+administrators, companies, artists, testers and last, but not least, our
+users.
+GNOME would not exist without all of you. Thank you to everyone!
+
+Our next release, GNOME 3.32, is planned for March 2019. Until then,
+enjoy GNOME 3.30!
+
+The GNOME Release Team
+-- 
+devel-announce-list mailing list
+devel-announce-list@gnome.org
+https://mail.gnome.org/mailman/listinfo/devel-announce-list
+
+ (Log in to post comments) +

+ +

+

+

+
+
+
+
+ +
+
+

+ + Copyright © 2018, Eklektix, Inc.
+ + Comments and public postings are copyrighted by their creators.
+ Linux is a registered trademark of Linus Torvalds
+
+

+ + + + + + \ No newline at end of file diff --git a/test/source/LWN/Articles/764300.html b/test/source/LWN/Articles/764300.html new file mode 100644 index 0000000..e1d7bcc --- /dev/null +++ b/test/source/LWN/Articles/764300.html @@ -0,0 +1,259 @@ + + + Security updates for Thursday [LWN.net] + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+ User: Password:
| +
+ +
| +
+ +
+
+ +
+
+
+

Security updates for Thursday

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Dist.IDReleasePackageDate
DebianDSA-4286-1stablecurl2018-09-05
DebianDLA-1494-1LTSgdm32018-09-05
DebianDLA-1495-1LTSgit-annex2018-09-05
DebianDLA-1496-1LTSlcms22018-09-06
DebianDSA-4285-1stablesympa2018-09-05
FedoraFEDORA-2018-38bdbafa96F28discount2018-09-06
FedoraFEDORA-2018-fe437a98d6F27dolphin-emu2018-09-06
FedoraFEDORA-2018-5bf744beeeF28gd2018-09-06
FedoraFEDORA-2018-fac5420dd1F27obs-build2018-09-06
FedoraFEDORA-2018-fac5420dd1F27osc2018-09-06
FedoraFEDORA-2018-4f0b7d1251F27tcpflow2018-09-06
FedoraFEDORA-2018-5ad77cc979F28tcpflow2018-09-06
FedoraFEDORA-2018-7626df1731F27yara2018-09-06
FedoraFEDORA-2018-8344cb89acF28yara2018-09-06
openSUSEopenSUSE-SU-2018:2628-115.0wireshark2018-09-05
SlackwareSSA:2018-249-01curl2018-09-06
SlackwareSSA:2018-249-03firefox2018-09-06
SlackwareSSA:2018-249-02ghostscript2018-09-06
SlackwareSSA:2018-249-04thunderbird2018-09-06
SUSESUSE-SU-2018:2630-1SLE15apache-pdfbox2018-09-06
SUSESUSE-SU-2018:2629-1curl2018-09-05
SUSESUSE-SU-2018:2632-1OS7 SLE12dovecot222018-09-06
SUSESUSE-SU-2018:2631-1OS7 SLE12libvirt2018-09-06
UbuntuUSN-3759-212.04libtirpc2018-09-05
UbuntuUSN-3759-114.04 16.04 18.04libtirpc2018-09-05
+
+ (Log in to post comments) +

+ +

+

+ +

+
+
+
+
+ + +
+
+

+ + Copyright © 2018, Eklektix, Inc.
+ + Comments and public postings are copyrighted by their creators.
+ Linux is a registered trademark of Linus Torvalds
+
+

+ + + + + + \ No newline at end of file diff --git a/test/source/LWN/Articles/764321.html b/test/source/LWN/Articles/764321.html new file mode 100644 index 0000000..7d51dae --- /dev/null +++ b/test/source/LWN/Articles/764321.html @@ -0,0 +1,216 @@ + + + The Hidden Benefit of Giving Back to Open Source Software (Working Knowledge) [LWN.net] + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+ User: Password:
| +
+ +
| +
+ +
+
+ +
+
+
+

The Hidden Benefit of Giving Back to Open Source Software (Working Knowledge)

+ +
+
+The Harvard Business School's "Working Knowledge" site has an +article arguing that it can pay for companies to allow their developers +to contribute back to the projects whose software they use. +"And that presents an interesting dilemma for firms that rely heavily +on open source. Should they allow employees on company time to make updates +and edits to the software for community use that could be used by +competitors? New research by Assistant Professor Frank Nagle, a member of +the Strategy Unit at Harvard Business School, shows that paying employees +to contribute to such software boosts the company’s productivity from using +the software by as much as 100 percent, when compared with free-riding +competitors."
+ (Log in to post comments) +

+ +

+

+ + +

+

The Hidden Benefit of Giving Back to Open Source Software (Working Knowledge)

+
+

Posted Sep 6, 2018 18:43 UTC (Thu) by sjfriedl (subscriber, #10111) + [Link] +

+
+This is no surprise to me. Most of the open source software improvements that might help a competitor are too general in nature to really be giving the other guys a competitive advantage.
+

+For instance, if Lyft contributed Linux kernel or PHP or Apache or whatever fixes, the benefit to Lyft of having that improved expertise far exceeds the general benefit to competitor Uber.
+

+ +
+

+

+
+ +
+
+ +
+ + +
+

The Hidden Benefit of Giving Back to Open Source Software (Working Knowledge)

+
+

Posted Sep 6, 2018 21:42 UTC (Thu) by k8to (subscriber, #15413) + [Link] +

+
+Even having to debate it seems so farcical. If you're worried about people who "do the same thing", the software they use is not the main differentiator. How your company is organized, how you treat your people and your customers, how you organized projects etc are huge, and software is ultimately minor. Fixes and changes to software? Incredibly minor.
+
+ +
+

+

+
+ +
+
+ +
+
+ + +
+

The Hidden Benefit of Giving Back to Open Source Software (Working Knowledge)

+
+

Posted Sep 6, 2018 21:56 UTC (Thu) by k8to (subscriber, #15413) + [Link] +

+
+Perhaps this is too dismissive, as there is the part about letting your programmers do their job to the best of their ability. That seems pretty big.
+
+ +
+

+

+
+ +
+
+ +
+
+
+
+
+
+
+ +
+
+

+ + Copyright © 2018, Eklektix, Inc.
+ + Comments and public postings are copyrighted by their creators.
+ Linux is a registered trademark of Linus Torvalds
+
+

+ + + + + + \ No newline at end of file diff --git a/test/source/LWN/index.html b/test/source/LWN/index.html new file mode 100644 index 0000000..bf54316 --- /dev/null +++ b/test/source/LWN/index.html @@ -0,0 +1,502 @@ + + + Welcome to LWN.net [LWN.net] + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+
+ User: Password:
| +
+ +
| +
+ +
+
+ +
+
+
+

Welcome to LWN.net

+
+
+
+
+ LWN.net is a reader-supported news site dedicated to producing the best + coverage from within the Linux and free software development communities. + See the LWN FAQ for more information, and + please consider subscribing to gain full + access and support our activities. +
+

+

+
[$] Writing network flow dissectors in BPF
+ +
+ [Kernel] Posted Sep 6, 2018 15:59 UTC (Thu) by corbet +

+ Network packet headers contain a great deal of information, but the +kernel often only needs a subset of that information to be able to perform +filtering or associate any given packet with a flow. The piece of code that +follows the different layers of packet encapsulation to find the important +data is called a flow dissector. In current Linux kernels, the flow +dissector +is written in C. A patch set has been +proposed recently to implement it in BPF with the clear goal of improving +security, flexibility, and maybe even performance. + +

+ Full Story (comments: 1) +

+

+
+
The Hidden Benefit of Giving Back to Open Source Software (Working Knowledge)
+ +
+ [Briefs] Posted Sep 6, 2018 16:56 UTC (Thu) by corbet +

+ The Harvard Business School's "Working Knowledge" site has an +article arguing that it can pay for companies to allow their developers +to contribute back to the projects whose software they use. +"And that presents an interesting dilemma for firms that rely heavily +on open source. Should they allow employees on company time to make updates +and edits to the software for community use that could be used by +competitors? New research by Assistant Professor Frank Nagle, a member of +the Strategy Unit at Harvard Business School, shows that paying employees +to contribute to such software boosts the company’s productivity from using +the software by as much as 100 percent, when compared with free-riding +competitors." +

+ Comments (3 posted) +

+

+
+
+
[$] LWN.net Weekly Edition for September 6, 2018
+ + Posted Sep 6, 2018 3:03 UTC (Thu)

+ The LWN.net Weekly Edition for September 6, 2018 is available. +

+ Inside this week's LWN.net Weekly Edition +

    +
  • Front: Life behind the tinfoil curtain; User-space Spectre protection; fs-verity; IDA; Julia part 2; GopherCon. +
  • Briefs: Tink; Kernel & Maintainer Summit topics; LMDE 3; Firefox 62; GNOME 3.30; Quotes; ... +
  • Announcements: Newsletters; events; security updates; kernel patches; ... +
+Read more +

+ +

+
Security updates for Thursday
+ +
+ [Security] Posted Sep 6, 2018 13:55 UTC (Thu) by jake +

+ Security updates have been issued by Debian (curl, gdm3, git-annex, lcms2, and sympa), Fedora (discount, dolphin-emu, gd, obs-build, osc, tcpflow, and yara), openSUSE (wireshark), Slackware (curl, firefox, ghostscript, and thunderbird), SUSE (apache-pdfbox, curl, dovecot22, and libvirt), and Ubuntu (libtirpc). + +

+ Full Story (comments: none) +

+

+
+
+
[$] Life behind the tinfoil curtain
+ +
+ [Security] Posted Sep 5, 2018 22:11 UTC (Wed) by jake +

+

+Security and convenience rarely go hand-in-hand, but if your job (or life) +requires extraordinary care against potentially targeted attacks, the +security side of that tradeoff may win out. If so, running a system like +Qubes OS on your desktop or CopperheadOS on your phone might make sense, +which is just what Konstantin Ryabitsev, Linux Foundation (LF) director of IT +security, has done. He reported on the experience in a talk +[YouTube video] entitled "Life Behind the Tinfoil Curtain" at the 2018 +Linux +Security Summit North America. + +

+ Full Story (comments: 9) +

+

+
+
GNOME 3.30 released
+ +
+ [Development] Posted Sep 5, 2018 21:17 UTC (Wed) by ris +

+ The GNOME Project has announced the release of GNOME 3.30 +"Almería". "This release brings automatic updates in Software, more +games, and a new Podcasts application. Improvements to core GNOME +applications include a refined location and search bar in Files, a +[Thunderbolt] panel in Settings, support for remoting using RDP in Boxes, and +many more." The release notes +contain more information. + +

+ Full Story (comments: none) +

+

+
+
+
[$] Strengthening user-space Spectre v2 protection
+ +
+ [Kernel] Posted Sep 5, 2018 21:47 UTC (Wed) by corbet +

+ The Spectre variant 2 vulnerability allows the speculative execution of +incorrect (in an attacker-controllable way) indirect branch predictions, +resulting in +the ability to exfiltrate information via side channels. The kernel has +been reasonably well protected against this variant since shortly after its +disclosure in January. It is, however, possible for user-space processes +to use Spectre v2 to attack each other; thus far, the mainline kernel has +offered relatively little protection against such attacks. A recent proposal +from Jiri Kosina may change that situation, but there are still some +disagreements around the details. + +

+ Full Story (comments: 1) +

+

+
+
Firefox 62.0 released
+ +
+ [Development] Posted Sep 5, 2018 17:31 UTC (Wed) by ris +

+ Mozilla has released Firefox 62.0, with several new features. The Firefox +Home (default New Tab) allows users to display up to 4 rows of top sites, +Pocket stories, and highlights; for those using containers there is menu +option to reopen a tab in a different container; Firefox 63 will remove all +trust for Symantec-issued certificates, and it is optional in Firefox +62; FreeBSD support for WebAuthn was added; and more. See the release +notes for details. +

+ Comments (none posted) +

+

+
+
+
[$] Learning about Go internals at GopherCon
+ +
+ [Front] Posted Sep 5, 2018 19:20 UTC (Wed) by jake +

+

GopherCon is the major +conference for the Go language, attended +by 1600 +dedicated "gophers", as the members of its community like to call +themselves. Held for the last five years in Denver, it attracts programmers, +open-source contributors, and technical managers from all over North +America and the world. GopherCon's highly-technical program is an intense +mix of Go internals and programming tutorials, a few of which we will +explore in this article. +

+Subscribers can read on for a report from GopherCon by guest author Josh +Berkus. + +

+ Full Story (comments: 10) +

+

+
+
A set of stable kernels
+ +
+ [Kernel] Posted Sep 5, 2018 15:15 UTC (Wed) by ris +

+ Greg Kroah-Hartman has released stable kernels 4.18.6, 4.14.68, 4.9.125, 4.4.154, and 3.18.121. They all contain important fixes and +users should upgrade. +

+ Comments (none posted) +

+

+
+
+
[$] An introduction to the Julia language, part 2
+ +
+ [Development] Posted Sep 4, 2018 15:57 UTC (Tue) by jake +

+

Part 1 of this series introduced +the Julia project's goals and +development process, along with +the language syntax, including the basics of control flow, data +types, and, in more detail, how to work with arrays. In this part, +user-defined functions and the central +concept of multiple dispatch are described. It will also survey Julia's +module and +package system, cover some syntax features, show how to make +plots, and briefly dip into macros and distributed computing. + +

+ Full Story (comments: 7) +

+

+
+
Security updates for Wednesday
+ +
+ [Security] Posted Sep 5, 2018 15:01 UTC (Wed) by ris +

+ Security updates have been issued by Debian (lcms2), openSUSE (yubico-piv-tool), Oracle (kernel), and SUSE (cobbler and kvm). + +

+ Full Story (comments: none) +

+

+
+
+
[$] IDA: simplifying the complex task of allocating integers
+ +
+ [Kernel] Posted Sep 4, 2018 0:15 UTC (Tue) by corbet +

+ It is common for kernel code to generate unique integers for identifiers. +When one plugs in a flash drive, it will show up as +/dev/sdN; that N (a letter derived from a +number) must be generated in the +kernel, and it should not already be in use for another drive or unpleasant +things will happen. One might think that generating such numbers would not +be a difficult task, but that turns out not to be the case, especially in +situations where many numbers must be tracked. The IDA (for "ID +allocator", perhaps) API exists to handle this specialized task. In past +kernels, it has managed to make the process of getting an unused number +surprisingly +complex; the 4.19 kernel has a new IDA API that simplifies things +considerably. + +

+ Full Story (comments: 8) +

+

+
+
Security updates for Tuesday
+ +
+ [Security] Posted Sep 4, 2018 15:14 UTC (Tue) by ris +

+ Security updates have been issued by openSUSE (ImageMagick, libressl, postgresql10, spice, and spice-gtk), Red Hat (collectd, kernel, Red Hat Gluster Storage, Red Hat Virtualization, RHGS WA, rhvm-appliance, and samba), and SUSE (crowbar, crowbar-core, crowbar-ha, crowbar-openstack, crowbar-ui, kernel, spice, and spice-gtk). + +

+ Full Story (comments: none) +

+

+
+
+
[$] Protecting files with fs-verity
+ +
+ [Kernel] Posted Aug 30, 2018 18:50 UTC (Thu) by corbet +

+ The developers of the Android system have, among their many goals, the wish +to better protect Android devices against persistent compromise. It is bad +if a device is taken over by an attacker; it's worse if it remains +compromised even after a reboot. Numerous mechanisms for ensuring the +integrity of installed system files have been proposed and implemented +over the years. But it seems there is always room for one more; to fill +that space, the fs-verity +mechanism is being proposed as a way to protect individual files from +malicious modification. + +

+ Full Story (comments: 6) +

+

+
+
Topics sought for the Kernel and Maintainer Summits
+ +
+ [Kernel] Posted Sep 3, 2018 19:07 UTC (Mon) by corbet +

+ The annual Maintainer and Kernel Summits will be held in Vancouver, BC on +November 12 to 15, in conjunction with the Linux Plumbers Conference. +The program committee is looking for topics for both summits; read on for +details on how to submit ideas and, perhaps, get an invitation to the +Maintainer Summit. + +

+ Full Story (comments: none) +

+

+
+
+
LWN.net Weekly Edition for August 30, 2018
+ + Posted Aug 30, 2018 1:06 UTC (Thu)

+ The LWN.net Weekly Edition for August 30, 2018 is available. +

+ Inside this week's LWN.net Weekly Edition +

    +
  • Front: Julia; C considered dangerous; 4.19 Merge window; I/O controller throughput; KDE onboarding; Dat. +
  • Briefs: OpenSSH 7.8; 4.19-rc1; Which stable?; Netdev 0x12; Bison 3.1; Quotes; ... +
  • Announcements: Newsletters; events; security updates; kernel patches; ... +
+Read more +

+ +

+
Security updates for Monday
+ +
+ [Security] Posted Sep 3, 2018 15:41 UTC (Mon) by ris +

+ Security updates have been issued by Debian (dojo, libtirpc, mariadb-10.0, php5, ruby-json-jwt, spice, spice-gtk, tomcat8, and trafficserver), Fedora (ghc-hakyll, ghc-hs-bibutils, ghostscript, mariadb, pandoc-citeproc, phpMyAdmin, and xen), Mageia (java-1.8.0-openjdk, libarchive, libgd, libraw, libxcursor, mariadb, mercurial, openssh, openssl, poppler, quazip, squirrelmail, and virtualbox), openSUSE (cobbler, libressl, wireshark, and zutils), and SUSE (couchdb, java-1_7_0-ibm, java-1_7_1-ibm, OpenStack, and spice). + +

+ Full Story (comments: none) +

+

+
+
+
[$] Measuring (and fixing) I/O-controller throughput loss
+ +
+ [Kernel] Posted Aug 29, 2018 21:20 UTC (Wed) by corbet +

+ Many services, from web hosting and video streaming to cloud storage, +need to move data to and from storage. They also often require that each per-client +I/O flow be guaranteed a non-zero amount of bandwidth and a bounded latency. An +expensive way to provide these guarantees is to over-provision +storage resources, keeping each resource underutilized, and thus +have plenty of bandwidth available for the few I/O flows dispatched to +each medium. Alternatively one can use an I/O controller. Linux provides +two mechanisms designed to throttle some I/O streams to allow others to +meet their bandwidth and latency requirements. These mechanisms work, but +they come at a cost: a loss of as much as 80% of total available I/O +bandwidth. I have run some tests to demonstrate this problem; some +upcoming improvements to the bfq I/O +scheduler promise to improve the situation considerably. + +

+ Full Story (comments: 4) +

+

+
+
Kernel prepatch 4.19-rc2
+ +
+ [Kernel] Posted Sep 2, 2018 22:29 UTC (Sun) by corbet +

+ The 4.19-rc2 kernel prepatch is out for +testing. +"As usual, the rc2 release is pretty small. People are taking a +breather after the merge window, and it takes a bit of time for bug +reports to start coming in and get identified." +

+ Comments (none posted) +

+

+
+

--> More news items + +

+
+
+
+
+
+ +
+
+

+ + Copyright © 2018, Eklektix, Inc.
+ + Comments and public postings are copyrighted by their creators.
+ Linux is a registered trademark of Linus Torvalds
+
+

+ + + + + + \ No newline at end of file -- 2.27.0