From ffca225ca7b97732dfaec1668c46d40d6142edd0 Mon Sep 17 00:00:00 2001 From: jhollowe Date: Sun, 31 Dec 2023 16:33:18 +0000 Subject: [PATCH] deploy: 3a0013c86f99f49f05b04e68f8c6f7f47f03ae92 --- archive/index.html | 6 +- archive/index.json | 2 +- archive/page/2/index.html | 2 + categories/101/index.html | 18 ++-- categories/101/index.xml | 2 +- categories/clemson/index.xml | 2 - categories/conference/index.html | 18 ++-- categories/guide/index.html | 18 ++-- categories/index.html | 8 +- categories/index.json | 2 +- categories/index.xml | 2 +- categories/life/index.html | 83 +++++++++++++++++++ categories/life/index.xml | 3 + categories/life/page/1/index.html | 2 + categories/meta/index.html | 18 ++-- categories/networks/index.html | 18 ++-- categories/networks/index.xml | 2 +- categories/{clemson => opinion}/index.html | 26 +++--- categories/opinion/index.xml | 5 ++ .../{clemson => opinion}/page/1/index.html | 4 +- categories/review/index.html | 18 ++-- categories/story-time/index.xml | 1 - categories/story-time/page/1/index.html | 2 - categories/work/index.html | 18 ++-- categories/work/index.xml | 2 +- index.html | 24 +++--- index.json | 2 +- index.xml | 10 ++- page/2/index.html | 22 ++--- .../index.html | 4 +- .../index.html | 4 +- posts/ato22/index.html | 2 +- posts/aws-tg-mtu/index.html | 2 +- posts/clemson-soc-101/index.html | 6 +- posts/framework-first-impressions/index.html | 2 +- posts/framework-followup/index.html | 2 +- posts/index.html | 22 +++-- posts/index.json | 2 +- posts/index.xml | 10 ++- posts/masters-degree-takeaways/index.html | 4 +- .../index.html | 4 +- posts/page/2/index.html | 20 +++-- posts/steamdeck/index.html | 17 ++++ series/framework-laptop/index.html | 18 ++-- series/framework/index.html | 18 ++-- .../index.html | 18 ++-- .../index.xml | 2 +- series/index.json | 2 +- series/raspberry-pi-cluster/index.html | 18 ++-- series/til/index.html | 18 ++-- sitemap.xml | 2 +- tags/active-directory/index.html | 18 ++-- tags/ato/index.html | 18 ++-- tags/aws/index.html | 18 ++-- tags/aws/index.xml | 2 +- tags/backup/index.html | 18 ++-- tags/{opinion => clemson}/index.html | 26 +++--- tags/clemson/index.xml | 4 + tags/clemson/page/1/index.html | 2 + tags/cloud/index.html | 18 ++-- tags/cloud/index.xml | 2 +- tags/cluster/index.html | 18 ++-- tags/containers/index.html | 18 ++-- tags/development/index.html | 18 ++-- .../story-time => tags/gaming}/index.html | 30 ++++--- tags/gaming/index.xml | 3 + tags/gaming/page/1/index.html | 2 + tags/hardware/index.html | 24 +++--- tags/hardware/index.xml | 4 +- tags/index.html | 14 ++-- tags/index.json | 2 +- tags/index.xml | 2 +- tags/life/index.html | 20 +++-- tags/life/index.xml | 6 +- tags/linux/index.html | 18 ++-- tags/memory/index.html | 18 ++-- tags/networks/index.html | 18 ++-- tags/networks/index.xml | 2 +- tags/opinion/index.xml | 5 -- tags/opinion/page/1/index.html | 2 - tags/proxmox/index.html | 18 ++-- tags/ssh/index.html | 18 ++-- tags/sysadmin/index.html | 18 ++-- tags/web/index.html | 18 ++-- tags/web/index.xml | 2 +- tags/zfs/index.html | 18 ++-- 86 files changed, 589 insertions(+), 390 deletions(-) delete mode 100644 categories/clemson/index.xml create mode 100644 categories/life/index.html create mode 100644 categories/life/index.xml create mode 100644 categories/life/page/1/index.html rename categories/{clemson => opinion}/index.html (85%) create mode 100644 categories/opinion/index.xml rename categories/{clemson => opinion}/page/1/index.html (55%) delete mode 100644 categories/story-time/index.xml delete mode 100644 categories/story-time/page/1/index.html create mode 100644 posts/steamdeck/index.html rename tags/{opinion => clemson}/index.html (89%) create mode 100644 tags/clemson/index.xml create mode 100644 tags/clemson/page/1/index.html rename {categories/story-time => tags/gaming}/index.html (60%) create mode 100644 tags/gaming/index.xml create mode 100644 tags/gaming/page/1/index.html delete mode 100644 tags/opinion/index.xml delete mode 100644 tags/opinion/page/1/index.html diff --git a/archive/index.html b/archive/index.html index 851a972..22363a2 100644 --- a/archive/index.html +++ b/archive/index.html @@ -13,6 +13,8 @@ kimbieabout archive posts

Archive

\ No newline at end of file +Change Username Without Separate Session
\ No newline at end of file diff --git a/archive/index.json b/archive/index.json index f1f2c31..2fd5c5e 100644 --- a/archive/index.json +++ b/archive/index.json @@ -1 +1 @@ -[{"content":" The Tale Begins There I was, triaging a new issue that came in. A Linux VM running in the cloud was hanging when we started trying to run our workload on it. Huh, there was no output at all from the python script; it didn\u0026rsquo;t even create its log file, one of the first things it should do. Logging into the cloud instance, I looked around and noticed there was a python process running for the script we started, so the connection to the host and creating the python process at least worked. Well, since it didn\u0026rsquo;t work the first time, I killed the process and tried running the same command manually to see if there was an issue with the setup of the process. Aaaannnndddd it hung. But it doesn\u0026rsquo;t hang with the exact same NFS mount and AMI (root disk image) in a different cloud account we use.\nWell, this is interesting. Okay, let\u0026rsquo;s just look at the script we are running. Hung. Welp, I guess it is time for the good old turn-it-off-and-on-again fix. Now let\u0026rsquo;s look at the script. That seems fine. Let\u0026rsquo;s look at the python executable binary we are running. Hung. Uh, okay. Let\u0026rsquo;s check the script again. Hung. Well it looks like an NFS issue. Wireshark Time!\nAfter a bunch of test reads and write to the NFS mount with Wireshark slurping up packets, it looks like the client sends out read requests and the server never responds. The TCP connection retransmits the un-ACK\u0026rsquo;d packets until the TCP session times out, sends a RST, and sends the read request again.\nAfter inspecting the traffic in the AWS flow logs and in the cloud-to-on-prem firewall, it seems that all the traffic is correctly making it from the cloud client to the on-prem NFS server. So, what do we do now?\nAfter a bunch of additional tests, I ran a test of incrementally increasing the size of a file being written one byte at a time. The writes started to fail around 1300 bytes. Looking at the traffic in Wireshark, these write requests approached 1500 bytes. While both the server and client were using jumbo frames (9000 MTU), it is possible there is a 1500 MTU link somewhere between these two hosts.\nDiscovering the Path to a Fix Collaborating with our cloud operations team, we confirmed that the Direct Connect between the cloud and on-prem did have a 1500 MTU. However, this did not explain why the client/server could not use the standard Path MTU Discovery (PMTUD) to detect the smaller link and reduce the effective MTU to the lowest MTU along the path.\nPMTUD activates when a frame which is too large for a link is sent with the Don\u0026rsquo;t Fragment (DF) flag set. When network gear receives a frame too large for the MTU of the next hop, it will either fragment the packet or if the DF flag is set, return an ICMP error \u0026ldquo;Fragmentation Needed and Don\u0026rsquo;t Fragment was Set\u0026rdquo; packet to the sender and drop the packet. Testing in the other AWS account, this worked correctly and the TCP session downgraded to a 1500 MTU (technically the MSS was reduced to 1500 not the MTU, but that is a whole other topic). However for some reason in the original account, the session did not reduce to 1500. Comparing a packet capture from both accounts, I noticed that there was no ICMP error response in the broken account.\nAWSucks After much back-and-forth with our cloud ops team, we found that in the broken account there was an additional layer on top of the Direct Connect. The AWS Transit Gateway not only has a maximum MTU of 8500, but also does NOT return an ICMP \u0026ldquo;fragmentation but DF\u0026rdquo; error. So the client or server sends a packet larger than the MTU of the Transit Gateway, the TG drops the packet without informing the sender of why the packet is being dropped, and the sender continues to retransmit the packet for which it has not received an ACK thinking it was just randomly dropped.\nFinding Another Way So PMTUD won\u0026rsquo;t work; great. And we can\u0026rsquo;t reduce the client\u0026rsquo;s MTU to 1500 as there are workloads running on it which must have jumbo frames. Thus began a flurry of research resulting in me learning of Linux\u0026rsquo;s Packet-Later PMTUD. Using the net.ipv4.tcp_mtu_probing kernel tunable, we can enable an MTU (really MSS) size discovery for TCP sessions.\nHow It Works When the sender sends a packet which is too large for a link in the path of an active TCP connection, the too-large packet will be dropped by the network and the sender will not receive an ACK from the receiver for that packet. The sender will then retransmit the data on an exponential backoff until the maximum retransit count is reached. The sender will then send a RST and try a new TCP session (which if tried with the same size packet will just continue to repeat).\nThe tcp_mtu_probing functionality takes over once the standard TCP retransmit limit is reached. With tcp_mtu_probing enabled, the kernel\u0026rsquo;s network stack splits the offending packet into net.ipv4.tcp_base_mss sized packets and sends those packets instead of the too-large packet. For further packets, the network stack will attempt to double the current packet limit until it again fails to ACK the packet. It then uses this new largest packet size for all future packets for the TCP session. Linux 4.1 improves on this functionality by using a binary search instead of multiple doubling of the MSS. The initial reduced packet size starts at tcp_base_mss and then binary searches for the largest functioning MSS between the tcp_base_mss and the MTU of the interface passing the traffic.\nA great article digging deeper into this is Linux and the strange case of the TCP black holes\nConclusion While the ideal solution would have been for AWS to fix their broken, non-compliant network infrastructure, it is unlikely they will ever fix this. Using a solution which is built into the Linux kernel which allows the continued use of Jumbo frames for cloud-local traffic which preventing traffic over the Transit Gateway from breaking due to large packets.\n","description":"A simple issue at work with cloud hosts not being able to access an NFS mount on-prem turn into a multi-month bug hunt which ended with finding a low MTU network path and an AWS \"feature\" (pronounced bug)","id":0,"section":"posts","tags":["cloud","AWS","networks"],"title":"Unraveling the Mystery of NFS Hangs, or How The (Hybrid) Cloud is a Pain","uri":"https://johnhollowell.com/blog/posts/aws-tg-mtu/"},{"content":" I\u0026rsquo;ll start off by saying I love my Framework laptop. The transition from my old 15\u0026quot; laptop to this 13\u0026quot; Framework has been a lot more seamless than I thought it would be. It has worked perfectly for everything I\u0026rsquo;ve put it through.\nMy Experience With My Framework Battery Life Even with the recently-replaced batter in my old laptop, my Framework has a much longer battery life. Likely a combination of both the battery and processor, I\u0026rsquo;m able to get many hours of even a demanding workload. I\u0026rsquo;me able to have Discord open in a video call for hours while having many other browser tabs or games running without the worry of where my charger is.\nLap-ability The one loss from moving from a 15\u0026quot; laptop to a 13\u0026quot; laptop is the lessened ability to use it effectively on my lap while connected to cords. The smaller size of the 13\u0026quot; means that it sits more between my legs rather than fully on top of my legs. This is normally fine, especially since the fan vents to the rear rather than to the right or left so my legs aren\u0026rsquo;t getting blasted with heat, but it does make having cables connected to the ports is difficult and strains the cables\u0026rsquo; connectors.\nThankfully, I typically only need to have my charger connected to my laptop, so I found a solution. Since my charger is a type-c charger, I can just pop out one of my modules and directly connect the charger\u0026rsquo;s cable to the deeply-inset type-c port behind where the module would go. This make only the small cable be pressed against my leg and does not put any strain against the cable.\nCharging Fan One thing that has disappointed about my Framework is the leaf blower it turns into when plugged in to charge (when the battery is discharged). I think a combination of moving from the \u0026ldquo;Better Battery\u0026rdquo; Windows power profile while on battery to \u0026ldquo;Best Performance\u0026rdquo; when plugged in and the extra heat from the high-speed charging capabilities means the fan kicks up to be quite loud when plugging in. I have not played around much with power profiles to try to reduce this, but it typically only lasts for a short time and I almost always prefer the better performance rather than a bit of ignore-able noise for a bit.\nPhysical Camera/Microphone Switches I didn\u0026rsquo;t think this would be a big thing, but it is really nice to be able to have confidence that at the hardware level, my mic and camera are not able to be accessed.\nE Cores As I have a wide, eclectic collection of software I run on a regular basis, I was please to not run into many issues with programs not properly understanding/scheduling with the efficiency cores on the 12th gen Intel processor. There are some tools (e.g. zstd) which doesn\u0026rsquo;t properly gather the cores to use. However this could be due to running some of these quirky tools in WSL and how some tools try to detect hyper-threading to schedule themselves only on physical cores.\nFOMO? Now that 13th gen Intel and AMD mainboards have come out for the 13\u0026quot; Framework, do I feel like I am missing out or should have waited? not at all. If I would have needed a laptop once the 13th gen had come out, I would definitely have chosen to use the 13th gen mainboard, but I am happy with what I have. Especially since I rarely have a use case for a high-performance laptop, I\u0026rsquo;m very comfortable with my 12th gen.\nPart of the appeal of the Framework is that I don\u0026rsquo;t have to have as much of a fear of missing out. The new laptops all have the same hardware outside of the mainboard. If I want a 13th gen laptop, I can easily upgrade my existing laptop to the 13th gen and get a 12th gen computer to use as a server, media PC, etc. And if I keep my laptop for long enough that the hardware is wearing out, I can replace the parts that are broken (or of which I want an improved version) and keep all the remaining parts, reducing the cost of repair and keeping still-good parts from ending up e-waste.\nAs for regrets getting the Framework rather than some other newer system, I have none. I have not stayed as up-to-date with the laptop scene since I\u0026rsquo;m not currently in need of a new one, but the systems that I have seen have not presented any better features or performance for my use cases. Some of the new Apple laptops have been interesting to follow, but I\u0026rsquo;m not a big fan of many aspects of Apple\u0026rsquo;s hardware and ecosystem and I still do come across some software that is not compiled for ARM (a big one being Windows). I love ARM and use it quite a bit in my homelab (mostly Raspberry Pis), but for my main system is just not quite universal enough for a daily driver.\nConclusion Overall, I\u0026rsquo;m very happy with my Framework and would absolutely recommend it to others. Yes, it is more expensive than another laptop with comparable specs, but the Framework\u0026rsquo;s build quality is supreme. If your use of laptops is more disposable, the Framework may not be for you (and that is okay), but I value the goals of the Framework and truly expect to get my money\u0026rsquo;s worth out of the repairability and modularity of the Framework.\n","description":"After living with the 13\" Framework laptop and releases of new specs for the 13\" and plans for the 16\", I've got some thoughts on my Framework","id":1,"section":"posts","tags":["hardware","life"],"title":"Framework Followup","uri":"https://johnhollowell.com/blog/posts/framework-followup/"},{"content":" I recently upgraded my laptop to a Framework laptop since my old trusty laptop\u0026rsquo;s screen cracked and a replacement screen cost as much as some new laptops. These are my initial impressions of the laptop\u0026rsquo;s build, performance, and usability.\nUse Case I have a bit of a minimal use case for my laptop. Since I have a powerful desktop and a fairly performant phone, I don\u0026rsquo;t need my laptop to be a do-everything device. If I need to do something that requires a lot of performance (gaming, heavy development builds, video transcode, etc), I will use my desktop. If I need to quickly do something, I will use the phone that is always in my pocket or on the desk next to me. My laptop fulfils three main functions: portable large-screen remote access to desktop, couch web-browsing and light development, and media consumption while on the road.\nDesktop Remote The main place I will install games and software, store some files, and do high-performance tasks is on my desktop. I often will need or want to do something on my desktop while not sitting at my desk. Be it from a few meters away on the couch or thousands of kilometers away, I will often remote into my desktop from my laptop. There are not really any specific requirements, but a large screen, enough CPU performance to decode the remote screen stream, and good enough networking to get the connection through. This is honestly the lowest performance need for a laptop, but having hardware decode for whatever remote solution would provide long battery life for this use case.\nCouch Computer This is the middle-of-the-road use case in terms of requirements. It is mostly web browsing, some light video consumption, and low-demand development/writing (like writing this blog). I use VS Code devcontainers for just about everything, so being able to run docker and VS Code well is a must. Mostly, this presents as having enough memory for the containers, VS Code (thanks memory-hungry electron), and all the extensions I typically use. Occasionally, having some performance is nice to be able to build a new dev container (fast network to pull dependencies, fast CPU to decompress image layers and compile dependencies, and mostly fast disk to support fast installation of packages, create new layers, etc.) and makes getting started contributing to a new project incredibly streamlined.\nOn-the-road System This is the most taxing use case that I have for my laptop. This is everything from Couch Computer and more. Some video transcoding (compressing) of footage I\u0026rsquo;ve taken, some light (and not-so-light) gaming, and occasionally some heavy network traffic (using my laptop as a portable NAS or sneaker-net).\nThis is also the use case where the connectivity of the laptop is the most important. From hooking into projectors using HDMI, to needing ethernet for some network troubleshooting, to flashing a Raspberry Pi or reading images from an SD card, the most variability in how I interact with my computers is on the road. The ample expansion/connectivity modules make it easier to have the right connector where I want it, when I want it. Also, the ability to move my ports around mean I will never have to do the awkward my-HDMI-is-on-the-wrong-side-for-this-podium dance again. Further, having 4 thunderbolt USB-C ports means that even if there is not an official module for what you want, you can easily connect a dongle or even make your own modules. Always in the data center? make yourself an RS-232 serial port module for interacting with all the serial consoles on your hardware.\nDesktop Replacement As a bonus use case, I will very, very rarely use my laptop at my desk instead of my desktop. My work laptop usually sits on my desk, plugged into a thunderbolt dock connected to all my peripherals and monitors. Every once in a while, I might use this setup with my personal laptop in this setup if I was working on some project on my laptop that would be too cumbersome to move to my desktop but might benefit from the extra monitors and peripherals.\nBuild Form Factor The Framework is a 13.5\u0026quot; laptop with a 3:2 screen ratio. While I\u0026rsquo;m used to my previous laptop\u0026rsquo;s 15\u0026quot; form factor, the added height of the Framework\u0026rsquo;s screen and higher resolution maintains a good amount of screen real estate. It also provides a more compact body which is more portable and takes up less space on a desk. Weighing in at 4.4 lb, it isn\u0026rsquo;t a light laptop, but the incredibly sturdy chassis and zero deck flex on the keyboard are reason enough for the bit of weigh.\nPower and Battery It uses Type-C (USB-PD) for charging via any of the 4 expansion ports when a USB-C expansion module is installed (or really you can directly connect to the type-c ports at the back of the expansion ports). This allows charging from either side of the laptop which brings a great versatility. While writing this, the idle power draw was ~15W at a medium-low screen brightness. Running a benchmark, the draw from the USB-C charger reached ~62W (on a 90W charger).Charging from 0% to ~80% while powered off averaged around 40W. Charging from ~85% to 100% averaged around a 30W draw (~10W to the battery and ~15W to the idle running system).\nKeyboard The keyboard is easy to type on with ample key spacing and a sensible key layout. I wrote this whole post on the Framework\u0026rsquo;s keyboard. The keys have good stabilization and have a comfortable travel distance. The palm rest areas beside the trackpad are large enough to use and the keyboard is centered on the chassis so one hand/wrist is more extended than the other.Overall, an easy keyboard on which to type.\nTrackpad Not much to say about the trackpad, and that is a good thing. The trackpad is a nice size: not too small to be useless and not too large to be cumbersome to use. It has a nice tactile click when pressed (which I rarely notice since I mostly tap-to-click rather than use the actual displacement button method of clicking) and a smooth surface which is easy to swipe across. The trackpad\u0026rsquo;s palm rejection while typing is very good, but the button still functions while the movement is disabled. If you place a lot of weight on the insides of your hands while typing, you may need to be careful to not push too hard on the trackpad while typing. The typical multi-touch gestures work correctly and smoothly zoom, swipe, and the rest.\nSpeakers The speakers on the Framework have impressed me so far. I will use earphones/headphones over speakers most of the time, but the speakers are much better than my previous laptop\u0026rsquo;s speakers and are a nice, usable option. They are quite loud and even at 100% there is no distortion, clipping, or chassis rattle. Although the speakers are down-firing at the front (user-facing side), they are on the angled bevel of the side so even sitting atop a flat surface the speakers fire out and around the chassis to provide a well-balanced sound profile.\nPerformance CPU My Framework performs well. I got the i5 12th gen variant (i5-1240P, up to 4.4 GHz, 4+8 cores) as a low power yet still performant portable system. Following on the Desktop Remote section above, I very rarely need my laptop to be very performant. What I want most of the time is something that can boost to do a little bit of compute while mostly being a power-efficient system that can run web apps, remote desktop software, and YouTube. The system excels at these tasks. I\u0026rsquo;ll leave the hard numbers and comparisons to benchmark publications, but the system has done everything (within reason) I\u0026rsquo;ve thrown at it.\nMemory While it may seem basic, the ability to have socketed memory can\u0026rsquo;t be ignored in modern laptops. Being able to upgrade and/or expand your system\u0026rsquo;s memory down the line is one of the simplest ways to give an old machine a boost. However, a lot of new machines are coming out with soldered memory that can\u0026rsquo;t be upgraded, expanded, or replaced. The availability of 2 SODIMM slots for memory is a great feature for repairability and the longevity of the system.\nCooling and Fan One disappointing aspect of the Framework is its cooling system and fan. When idle, the fan is inaudible and the user-facing components stay cool. However, even when idle the bottom chassis panel gets slightly too warm to hold for a long time. While on a desk, this is not an issue but when on a lap (where the lap in laptop comes from), the heat it a bit too much for bare skin contact and going hand-held with one hand on the bottom for support is not comfortable to hold. However, even when running full-tilt under a stress test, the top (keyboard, trackpad, and palm rest areas) stayed cool and comfortable.\nThe cooling fan, when going at full speed, is loud but does an adequate job of keeping the internals cool and preventing drastic thermal throttling. A concern I had heard from others was with the vent being in the hinge and concerns over the cooling capacity of the system while the screen is closed. After some tests, the hinge cover is shaped to direct the exhaust air out the bottom of the hinge which gives enough airflow to keep the system cool.\nWiFi 6E While I currently don\u0026rsquo;t have any other wifi gear which supports 6E to test against, I believe 6 GHz is going to be super useful in the coming years and having a computer that already supports it is a great feature. And even if it didn\u0026rsquo;t have a 6E chip in it, the Framework\u0026rsquo;s wifi is socketed which allows for future improvement.\nFor what I can test, the Framework\u0026rsquo;s WiFi works well. It gets the maximum speed my Access Point (AP) supports and has very good range. I haven\u0026rsquo;t noticed any difference it reception between different orientations of the laptop, so the antenna placement seems to be the best it can be.\nUsability I/O The ability to select the I/O that your laptop has is one of the obvious usability features of the Framework. The ability to have up to 4 USB-C thunderbolt ports is impressive and the various modules to adapt those ports into other common ports is fantastic. My favorite ability so far is just having a USB-C port on both sides of the laptop. When I was searching for a new laptop, few had a Type-C port and even fewer had at least one on both sides. The Framework works well with all the USB-C and thunderbolt docks and dongles that I have used with it.\nBattery Another great usability feature is the long battery life. The combination of an efficient processor and a high-capacity battery makes the Framework able to stay running for hours.\nSecurity, Privacy, and Webcam For security and privacy, the Framework has several great features. For signing in (on supported OSes), you can use the fingerprint sensor integrated into the power button for authentication. While my previous laptop had a Windows Hello capable camera, the fingerprint reader is just about as easy to use. The fingerprint reader works well\nOn the webcam, the Framework has physical toggles to disable the webcam and disable the microphone (independently). They toggles have a nice red section visible when disabled and the camera has a light when it is active. It is really nice to have physical switches for the cameras, and since I am using the fingerprint sensor for login (instead of the facial recognition of my previous laptop), I can leave the camera disabled most of the time. The camera is 1080p and does a good enough job with challenging situations like low light and high contrast environments.\nScreen The screen is a 2256 x 1504 (3:2) glossy screen. The extra screen real estate is nice for tasks that can make use of the extra vertical space, media consumption which is mostly 16:9 or wider leaves unused space on the screen. The maximum brightness of the screen is quite bright and is easily visible in direct sunlight. The screen also has a light detector which can be used for automatic screen brightness adjustments. However, at least in Windows, the auto brightness works well but causes a massive jump in brightness when adjusting to above ~50%. Due the the glossy, highly-reflective screen, bright sun from behind makes it hard to read the screen even at maximum brightness. I\u0026rsquo;m planning to investigate what matte screen films/protectors are available that I could use to make the screen less reflective. As I will very rarely use my laptop for very color accurate uses, a matte screen would be better.\nWindows Install and Drivers One cautionary note revolves around the newer, less used components in the Framework. I installed Windows 10 and out of the box, the trackpad and WiFi did not work. I had to use an Ethernet dongle (since I did not get the ethernet Framework module) to download the driver pack from Framework\u0026rsquo;s website. It did not automatically get the drivers from Windows Update like most other firmware/drivers. I also tried Ubuntu 22.04, and while it had fully functional WiFi and and trackpad out of the box, it did not properly adjust the screen backlight based on the function keys (but was able to control the brightness manually using the OS settings slider).\nOverall Impressions Overall, I really like my Framework laptop so far. I did not think I would like the smaller size, but setting the display scaling to lower than the default of 200% (I\u0026rsquo;m testing between 175% and 150%) give more than enough screen space for task I need to do on my laptop. After writing this whole post on the keyboard both on a couch and a desk, it is comfortable to type on and quick to pick up touch typing. It is small and portable while having good performance, battery longevity, and screen real estate. I wish it was a bit bigger as I like a laptop with a larger screen, but for the chassis size the screen is nearly 100% of the size of the laptop footprint. With a 11-in-1 USB dongle, it has as much or more connectivity than my desktop. It works flawlessly with thunderbolt docks (at least the ones I have tested). The first install of Windows 10 was a little painful having to install the driver bundle, but that is a small, one-time price to pay for a nice machine on an old OS.\n9.5/10. Would recommend.\n","description":"I recently upgraded my laptop to a Framework laptop since my old trusty laptop's screen cracked and a replacement screen cost as much as new some laptops. These are my initial impressions of the laptop's build, performance, and usability.","id":2,"section":"posts","tags":["hardware","life"],"title":"Framework First Impressions","uri":"https://johnhollowell.com/blog/posts/framework-first-impressions/"},{"content":" Trying to boot off an NVMe drive on older hardware can cause some issues. If you are running an older BIOS/UEFI, it may not have the needed drivers to understand how to talk to a NVMe drive. I ran into this exact issue when trying to boot my Dell R510 from an NVMe drive.\nTo boot from NVMe, I would need to use some shim which could be booted by the BIOS which would chain-boot the actual OS on the NVMe.\nAttempt 1 - Clover The first method I attempted to used was the Clover Bootloader. Clover, while primarily used for Hackintoshes, can have NVMe support added and chain boot to another disk. I wanted to try this first as I would prefer an OS-indifferent solution that would continue to work no matter what I installed on the NVMe.\nI attempted to image Clover onto a USB drive and after several wrong attempts, I finally formatted the USB as fat32 and just copy/pasted the contents to the drive. I then followed instructions I found to enable NVMe compatibility by copying NvmExpressDxe.efi from EFI/CLOVER/drivers/off into EFI/CLOVER/drivers/BIOS/ and EFI/CLOVER/drivers/UEFI/. I then modified the EFI/CLOVER/config.plist file to automatically boot the the NVMe drive after a 5 second pause.\nHowever, I could never get Clover to read this config.plist file. I tried placing it in other paths that were suggested by comments on the internet. I tried reverting to the original file and modifying one small value to ensure I had not messed up the file formatting. Still, I could not get Clover to read the config file and automatically boot from the NVMe drive. It would just remain at the boot selection menu where I could manually select the NVMe to boot from which would then work perfectly.\nAttempt 2 - Proxmox Boot Proxmox comes with the proxmox-boot-tool tool which is used to synchronize all the boot disks with the UEFI (ESP) partition. After giving up on Clover, I looked into proxmox-boot-tool and found I could just place an extra ESP partition on the USB drive and let proxmox-boot-tool keep it up-to-date and synced.\nRather than creating the correct partitions in the correct locations and of the right size, I just did a dd if=/dev/\u0026lt;root pool\u0026gt; of=/dev/\u0026lt;usb drive\u0026gt; bs=1M count=1024 to copy over the first 1 GB of the disk. I then used gparted to delete the main partition (leaving the BIO and ESP partitions) and to give the remaining partitions new UUIDs. I then booted into Proxmox and proxmox-boot-tool format /dev/disk/by-uuid/\u0026lt;USB ESP partition UUID\u0026gt; --force and proxmox-boot-tool init /dev/disk/by-uuid/\u0026lt;USB ESP partition UUID\u0026gt;. Once that finished, I rebooted and the USB drive was used as the boot drive which booted into the main Proxmox OS.\nConclusion I\u0026rsquo;ve had this in place for a few months now and it has worked perfectly through several updates to the boot cmdline options and kernel updates.\n","description":"My process of finding the best way to boot Proxmox off an NVMe drive in an old Dell R510","id":3,"section":"posts","tags":["sysadmin","proxmox"],"title":"NVMe Boot in Proxmox on Older BIOS","uri":"https://johnhollowell.com/blog/posts/nvme-proxmox-bios/"},{"content":" This was my first year going to the All Things Open and my first in-person conference in several years.\nOverall, I really enjoyed the conference and would recommend other\u0026rsquo;s attend. It definitely helped that I already live in Raleigh so I didn\u0026rsquo;t have to travel to the conference, but even traveling to the conference would be a good experience.\nVenue The Raleigh conference center is a spacious venue. The paths to the session rooms are wide and easy to access. Most of the session rooms were large enough to fit everyone in the session. The conference center has ample surrounding parking and food options if the catered sandwiches don\u0026rsquo;t cover your appetite. The sponsor/vendor booths were set up in the atrium with plenty of room to interact with the vendors and still have room to walk past. All the areas were clean and tidy and the HVAC worked well in all but the smallest session room when it was packed.\nVendor Booths There were a lot of vendors spread around the whole atrium area. The conference did an interesting optional gamification addition to the conference: the keynote sessions and each vendor booth had a code which when entered into the conference app would add points to your score. At the end of each day to top scorers were randomly draw for some very nice prizes.\nThere were a lot of really nice vendors present. From large companies like AWS, Microsoft, and Meta to small FOSS organizations like the FSF and OSI. Many vendors had great swag and welcoming representatives to talk to. While most of the companies were definitely focused on selling to enterprise customers, there were many that had personal/community versions of the software available and knowledgeable people to answer technical questions.\nSessions The session subjects covered a wide range of enterprise related to tracks focused on the open source community and collaboration. Some of the sessions were livestreamed for the virtual attendees (and thus recorded) while some were not recorded. I mostly attended the non-recorded sessions as I can watch the recorded sessions later, but all the sessions were well attended.\n","description":"My experience attending All Things Open for the first time","id":4,"section":"posts","tags":["life","ATO"],"title":"All Things Open 2022 Impressions","uri":"https://johnhollowell.com/blog/posts/ato22/"},{"content":" This December I graduated from Clemson University with my Masters degree in Computer Science. There were a lot of things I learned (obviously), but I think the most useful things I learned were not from the course material, but my outside learning and interactions with peers and professors.\nProject Organization All my courses this semester were project based; while some of the grade for the course came from quizzes or homeworks, over 50% came from a semester long project. My experience with these projects greatly differed based on the professor\u0026rsquo;s involvement and whether it was a group project.\nProblem Definition One of my main gripes for several of my project assignments was the complete lack of the professor defining what the project should look like. While there was some guidance on the general category of project that was required, there was little to no guidance of what specific topics were in scope. We submitted a project proposal, which would have helped with validating the acceptability of the project, however the professors rarely commented on the validity of the proposal, let alone return a grade for the proposal in a reasonable amount of time (read: before the end of the semester).\nThis is a perfect example of why requirements gathering and client interaction is such an important part of the development lifecycle. Knowing the plan for the project before spending development time ensures it is not wasted on something that is not the desired result. Having strict requirements allows the developer to precisely match the functionality to the desired outcomes.\nDeliverables Another important aspect which was mostly glossed over was deliverables. While each professor did say a deliverable of the project would be a final paper, specifics on the format, length, and content of the paper were lacking or never given. In addition, other deliverables were requested after the initial assignment was created, often at the very end of the semester. While this is not that uncommon in \u0026ldquo;real life,\u0026rdquo; often added requirements/deliverables will push back the projects due date; not so with school projects which must be done by the end of the semester.\nGroup Work Group work in school is almost always a complete mess. Over the course of my Masters degree, I\u0026rsquo;ve been in some okay groups and a lot of bad groups. I\u0026rsquo;ve been in groups where someone went completely AWOL for several months and only responded to messages when it was time for them to add their name to the deliverables. I\u0026rsquo;ve also been in some groups that were fantastic where the team members understood that occasionally someone might have other stuff they needed to prioritize but everyone would at the end of the semester all contributed equally. The best groups recognized the different skills of each member and assigned tasks to the person that was most capable of completing it.\nGroup work in school is very different from working in teams in industry. In school your group grade is at best 10% based on your individual contribution. This leads some people to not contribute to the team and just accept a 90% at their max grade. In work, if you do not do the tasks assigned to you, no one is going to do your tasks and it is very apparent who\u0026rsquo;s responsibility they are. Getting paid do do the work rather than paying to do the work also drastically changes the motivation and desire to complete the work.\nSelf Learning Most of the course I took in my Masters program covered information I had learned previously either on my own or on the job. This meant that a large portion of the course material was redundant to me. However, these courses gave me the opportunity to deepen my knowledge of the covered material and utilize the professors as a resource to discover new corollary topics to learn on my own. This gave me the opportunity to learn at my own pace and follow the rabbit trails that I find interesting.\nI have also had courses that I had to teach myself; professors that don\u0026rsquo;t teach or teach wrong material. One professor in particular I had to stop going to class as listening to her lectures decreased/confused my pre-existing knowledge on the topic.\nLab Teaching Assistantship I had a lot of fun being a Teaching Assistant (TA) for a undergrad lab section this past semester. I got to befriend some really cool students and get a taste of what it takes to teach. As I would like to teach at some point in the future, this was a fantastic opportunity to understand some of the requirements of teaching, experience the \u0026ldquo;joy\u0026rdquo; of grading, and dealing with students\u0026rsquo; questions and concerns.\n","description":"This December I graduated from Clemson University with my Masters degree in Computer Science. There were a lot of things I learned (obviously), but I think the most useful things I learned were not from the course material, but my outside learning and interactions with peers and professors.","id":5,"section":"posts","tags":["life","opinion"],"title":"Masters Degree Takeaways","uri":"https://johnhollowell.com/blog/posts/masters-degree-takeaways/"},{"content":" ZFS is a great filesystem that I use on most of my systems and it makes full-drive backups a breeze when I am refreshing hardware in my homelab. However, sometimes I want to backup to a non-ZFS system. These are the steps I use for fast and verified backups to a file on another computer.\nTL;DR: Combine the power of ZFS, zStandard, pv, and netcat to have a fast backup of a ZFS snapshot with verbose metrics of the process and progress.\nBackground If you already know about ZFS, snapshots, replication, and zStandard, feel free to skip this section. ZFS is a next-generation filesystem which supports a lot of great usability, data integrity, and performance features.\nOne of the most useful features are snapshots. Since ZFS is a copy-on-write (COW) filesystem, it can make a \u0026ldquo;copy\u0026rdquo; of an entire filesystem instantly as it just stores the current state and keeps blocks of data even if they later get updated/deleted. This is incredibly useful for backing up a system, as you can make a snapshot of the system instantly while it is running and then take the time to transfer the data.\nZFS can take a snapshot and zfs send the data in a stream that can be piped to a file, other commands, or a zfs receive on another host to load the datasets to that host\u0026rsquo;s storage and make the files available on the live filesystem. Receiving to another system has many benefits, but one major problem is the destination requires a ZFS pool mounted that has enough unused storage to receive all the incoming data. Sometimes this is not feasible, or even if the destination has a working pool it is not desired to mix in another filesystem with the existing data. In this case, sending to a file will store the entire send stream that can later be cat\u0026rsquo;d back to a zfs receive whenever desired.\nOne other tool used in this guide is zStandard. This is a newer compression library with great compression ratios while maintaining fairly high compression speed and incredibly fast decompression speed. I love zStandard and try to use it in everything. It has also had a large adoption increase in the last year or so with many other projects including zStandard compression support (ZFS, btrfs, tor, and Rsync to name a few).\nSetup There are two hosts: one using ZFS which will be backed up (src.example.com), and one host which will store the backup (dest.example.com). This destination host only needs enough storage space to store the (compressed) send stream.\nAll code is run on src.example.com unless otherwise noted. Making a Snapshot ZFS send streams only work on snapshots, so we need to create a snapshot of the current files and data to be able to send it. If you already have a up-to-date snapshot (maybe from automation), you can just uses that snapshot.\nTo create a snapshot, you either need to be root (run the following command with sudo), or have the snapshot ZFS permissions on the dataset. As we will be creating a recursive snapshot of all datasets, it is easier to just run commands as root.\nThe format of the snapshot command is\nzfs snap[shot] pool/datasetA/subdataset/thing1@snapshot-name.\nTo snapshot the \u0026ldquo;testing\u0026rdquo; dataset on my \u0026ldquo;tank\u0026rdquo; pool with the snapshot name \u0026ldquo;backup_2021-01-02_0304\u0026rdquo;, I would use either command\n1 2 zfs snap tank/testing@backup_2021-01-02_0304 zfs snapshot tank/testing@backup_2021-01-02_0304 To backup an entire pool, use zfs snap -r tank@full_backup which will recursively (-r) snapshot the given dataset and all datasets below it.\nDetermining the Size of the Send Now that we have our snapshot, it would be nice to know how much data we will be sending and storing for our backup. We can either get a (fairly accurate) estimate of the size of the send (quick) or get the exact size of the send. Unless you really need to know the exact size of the send, I recommend the fast method\nFast Size We can get an estimate of the size of a send by running the send with the dry-run flag (-n) in verbose mode (-v).\n1 zfs send -R -n -v tank@full_backup The last line should tell you the estimate of the size of the send.\nSlow Size If you really need the exact size of the send, you can use wc to get the total bytes being sent.\n1 zfs send -R tank@full_backup | wc -c If you want to see the speed that zfs can read the send data off your storage, you can use pv (you might need to install it) to see the size and speed.\n1 zfs send -R tank@full-backup | pv \u0026gt; /dev/null #fullsend Now that everything is prepared, we can actually send the data to the destination. We\u0026rsquo;ll start with the most basic form and add on some extra commands to add speed and metrics of the status of the send.\nIn the following examples, the zfs send command is used with the -R flag. This makes an \u0026ldquo;replication\u0026rdquo; send stream which can fully recreate the given snapshot from nothing. You can omit it if that is not the functionality you need.\n-R, \u0026ndash;replicate\nGenerate a replication stream package, which will replicate the specified file system, and all descendent file systems, up to the named snapshot. When received, all properties, snapshots, descendent file systems, and clones are preserved. 1\nBasic Send Getting bits from A to B is pretty easy. We can use SSH to send the data to the destination host and save it as a file2.\n1 zfs send -R tank@full-backup | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap\u0026#34; We can use the size we found earlier to get a rough progress bar. pv can take in the size of the stream and use it to determine an ETA and progress. It can take integer values with units of \u0026ldquo;k\u0026rdquo;, \u0026ldquo;m\u0026rdquo;, \u0026ldquo;g\u0026rdquo;, and \u0026ldquo;t\u0026rdquo;3.\nAssuming we have 24860300556 bytes (23.2GiB), we could use either of the following\n1 2 zfs send -R tank@full-backup | pv -s 24860300556 | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap\u0026#34; zfs send -R tank@full-backup | pv -s 24G | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap\u0026#34; If you have ZFS installed on the destination, you can check validate the send stream using zstreamdump4.\n1 2 # on dest.example.com cat /path/to/saved/file.zfsnap | zstreamdump While this works and is super reliable, it is inefficient in its data storage size and transport cost. The send stream is uncompressed on your destination and SSH can use significant CPU on low-power devices.\nThe next two solutions seek to solve these problems.\nCompression As long as you are not sending a raw or encrypted snapshot, there will be some amount of compressible data in the send stream. We can compress the send stream so it is (a bit) smaller on the destination\u0026rsquo;s storage.\nYou can compress on either the source or the destination, however compressing on the source means less data is transmitted over the network which usually is slower than the CPU needed for compression.\nWe\u0026rsquo;ll use zStandard due to its speed, compression ratio, and adaptable compression level.\nBasic Usage\n1 zfs send -R tank@full-backup | zstd -c | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap\u0026#34; ZStandard can also use an adaptive compression level. This means that if the network is slow and the compressor would otherwise be idle, it can increase the compression level and can also reduce the level if the network speeds up. This does mean that it can be a low compression ratio, but if reduced storage space is desired, the stream can be recompressed (e.g. zstd -d /path/to/saved/file.zfsnap.zst | zstd -T0 -19 /path/to/saved/file_smaller.zfsnap.zst). The minimum and maximum levels for the adaption can be set, but using just --adapt defaults to sane defaults (3 to 15).\nIt can also use multiple threads to fully utilize all the cores in the host. The number of threads can be specified or set to 0 to use the same number of threads as cores (-T0)5. It has a verbose mode (-v) as well which gives insight to the compression level and compression ratio of the stream.\n1 zfs send -R tank@full-backup | zstd -c -v -T0 --adapt=min=1,max=19 | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap.zst\u0026#34; pv can also be used to give progress and speed calculations (however, it seems that the verbose output of zstd conflicts with pv):\n1 zfs send -R tank@full-backup | pv -cN raw -s 24G | zstd -c -T0 --adapt=min=1,max=19 | pv -cN compressed | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap.zst\u0026#34; Local Send Only use the following across a network you trust (not the internet). This method sends data unencrypted. SSH takes a lot of processing power to encrypt data when sending large amounts of data through it. If we are on a secure network where we can sacrifice encryption for speed, we can use netcat instead of ssh.\nHowever, there is not server on the destination (unlike the SSH daemon), so we need to start a netcat server on the destination to listen (-l) for connections on a port (12345) and have it redirecting to the destination file (with pv showing us stats on the receiving side).\n1 2 # on dest.example.com nc -l 12345 | pv \u0026gt; /path/to/saved/file.zfsnap Now we can send it data to save to the file\n1 zfs send -R tank@full-backup | pv -s 24G | nc dest.example.com 12345 Putting it all together 1 2 # on dest.example.com nc -l 12345 | pv \u0026gt; /path/to/saved/file.zfsnap.zst 1 2 3 4 5 6 # on src.example.com snapName=\u0026#39;tank@full-backup\u0026#39; zfs snap -r ${snapName} sendSize=$(zfs send -v --dryrun -R ${snapName} | grep \u0026#34;total estimated\u0026#34; | sed -r \u0026#39;s@total estimated size is ([0-9\\.]+)(.).*@\\1\\n\\2@\u0026#39; | xargs printf \u0026#34;%.0f%s\u0026#34;) zfs send -R ${snapName} | pv -cN raw -s ${sendSize} | zstd -c -T0 --adapt=min=1,max=19 | pv -cN compressed | nc dest.example.com 12345 https://openzfs.github.io/openzfs-docs/man/8/zfs-send.8.html\u0026#160;\u0026#x21a9;\u0026#xfe0e;\nAs far as I know, the .zfsnap is not an official or commonly used extension. However, it helps me know what the file is, so I\u0026rsquo;ve used it here. Use whatever file name and extension you want.\u0026#160;\u0026#x21a9;\u0026#xfe0e;\nhttps://linux.die.net/man/1/pv\u0026#160;\u0026#x21a9;\u0026#xfe0e;\nhttps://linux.die.net/man/8/zstreamdump\u0026#160;\u0026#x21a9;\u0026#xfe0e;\nThe documentation for zStandard notes that using the -T flag with --adapt can cause the level to get stuck low. If you have problems with the compression level getting stuck at a low value, try removing the threads flag.\u0026#160;\u0026#x21a9;\u0026#xfe0e;\n","description":"ZFS is a great filesystem which I use on most of my systems and it makes full-drive backups a breeze. However, sometimes I want to backup to a non-ZFS system. These are the steps I use for fast and verified backups to a file on another computer.","id":6,"section":"posts","tags":["ZFS","backup","sysadmin"],"title":"ZFS Backups to Files","uri":"https://johnhollowell.com/blog/posts/zfs-backups-to-files/"},{"content":"I first noticed Kernel Same-page Merging (KSM) while working with Virtual Machines (VMs) under KVM (in Proxmox VE).\nKSM is a way of reducing physical memory usage by using one physical page of memory for all duplicate copied of that page. It does this by periodically scanning through memory, finding duplicate pages, and de-duplicating them via virtual memory. It is an extension of how the kernel shares pages between fork()\u0026lsquo;ed processes and uses many of the same methods of sharing memory. KSM is most often used with virtualization to de-duplicate memory used by guest Operating Systems (OSs), but can be used for any page of memory which the program registers with KSM to scan. \u0026ldquo;Red Hat found that thanks to KSM, KVM can run as many as 52 Windows XP VMs with 1 GB of RAM each on a server with just 16 GB of RAM.\u0026rdquo;1\nVirtual Memory Background To fully understand how KSM works, a (at least) basic understanding of how virtual memory work is required.\nTo prevent programs from having to know where every other process on the computer is using memory, the kernel (the all-powerful dictator of the OS) tells each process it has memory starting at address 0. It then keeps a record of where in actual (physical) memory each block (page) or the virtual memory is located.\nIt uses this mapping to translate memory addresses each time the process reads or writes to memory.\n© Computer History Museum This virtual memory also allows things like memory-mapped files on disk and Copy-On-Write (COW) pages. When a process clones (forks) itself, it doesn\u0026rsquo;t have to make a copy of all the memory it was using. It simply marks each page as COW. Each process can read from their memory with both virtual addresses pointing to the same physical page (now marked COW), but when either attempts to write to memory, the existing physical page is left inn place (so the other process can still use it) and a new physical page is allocated and mapped to the writer\u0026rsquo;s virtual memory. This allows pages of memory that are not changed in forked processes to use no additional memory.\nthe same process is used by KSM: it finds duplicate pages in the memory ranges registered with it, marks one of the physical pages as COW, and frees the other physical pages after mapping all the virtual pages to the one physical page.\nhttps://kernelnewbies.org/Linux_2_6_32#Kernel_Samepage_Merging_.28memory_deduplication.29\u0026#160;\u0026#x21a9;\u0026#xfe0e;\n","description":"Today I Learned about Kernel Same-page Merging (KSM)","id":7,"section":"posts","tags":["Linux","memory"],"title":"TIL: Kernel Same-page Merging (KSM)","uri":"https://johnhollowell.com/blog/posts/til-ksm/"},{"content":" Please read A Trickle Is Better Than Nothing before reading this post. I just got over having no internet at my apartment for over a week. I was gone a portion of the week, but it was still very inconvenient. Working remotely doesn\u0026rsquo;t help as to get paid I need to have an internet connection (but not necessarily a fast connection).\nWorking Around It While I could have use cellular data to carry me through, I had already used a significant portion of my data cap on various travels this summer. I ended up just going onto campus and working from my laptop in a computer lab.\nWhile on campus (with its wonderful gigabit symmetrical internet), I downloaded some videos from my YouTube Watch Later playlist so I could have some videos to watch at home. I tried to do as much pre-downloading of content I could so I would have it accessible at home.\nMissing the Trickle So I had everything downloaded and I was fine, right? Wrong.\nI do more with my life than just watching YouTube. I play games, I browse social media, and (most frustratingly in this situation) I code. It is impossible to stay up-to-date on PRs and Issues without being able to connect to the internet. While I could have looked at the GutHub website on my phone, I have a lot of nice tooling around Issues/PRs that is on my desktop.\nI also wanted to open some PRs on some FOSS projects I want to improve. I couldn\u0026rsquo;t do a git clone, I couldn\u0026rsquo;t download the devcontainers needed for the new project and language, I couldn\u0026rsquo;t easily research how to do what I wanted in the documentation on StackOverflow. This stopped me dead in my tracks and forced me to either make a trip back to campus to get internet or use the limited cellular data I had left to clone the entire repo and pull all the require container layers.\nWhat If How could it have been if I had at least a small amount of internet? I would still utilize the high-speed connection at campus to download some content to watch, but I would have still been able to pull up the YT page for the video to see comments and the description and to comment and like myself. While it would have taken a while, I could have left the repo and containers to download while I was watching something or making dinner or overnight. I could have refreshed my Issues/PRs and get any updates on their status and checks. I could have seen that a new video was released by my favorite channel and either queue the video to download or go somewhere with internet to quickly download it.\nOverall, I am very grateful for the internet I have. This just makes me appreciate the internet all the more with its redundancy and high availability and goes to prove that the last mile is really the most vulnerable segment of any network or connection.\n","description":"I just got over having no internet at my apartment for over a week, and I can confirm that a trickle is better than nothing.","id":8,"section":"posts","tags":["web","life","opinion"],"title":"Nothing Is Definitely Worse Than a Trickle","uri":"https://johnhollowell.com/blog/posts/nothing-is-definitely-worse-than-a-trickle/"},{"content":" Setting up and maintaining a development environment is hard, especially when you need to destructively test features or libraries. Especially for contributing to a new project, you don\u0026rsquo;t know everything that is needed. Sometimes the install/development instructions assume some base tools or packages that are not included in your development environment of choice.\nIn come devcontainers. Rather than having to search through the README for a project you are wanting to contribute to, installing several packages onto your machine, and troubleshooting when it doesn\u0026rsquo;t work, you can simply open the repository as a devcontainer and you are ready to start contributing. Have a project that requires several separate services (databases, middleware/api server, etc.)? Create a devcontainer using docker-compose and your development environment can launch an entire suit of containers exactly how you need them.\nSetup Install Docker To be able to use containers, we need a container manager: Docker.\nTo get Docker installed, simply follow their instructions\nInstall VS Code To get Visual Studio Code (VS Code) installed, simply follow their instructions\nAdd container remote extension Within VS Code, install the Remote - Containers extension\nClick the Extensions sidebar (or use the \u0026ldquo;Ctrl + Shift + X\u0026rdquo; shortcut) Search for ms-vscode-remote.remote-containers Click \u0026ldquo;Install\u0026rdquo; Test It Out Now that you are ready to use a devcontainer, it is time to test it out!\nYou can grab this blog and use it as the devcontainer to play with. Click on the bottom left in VS Code on the green arrows, find the Container remote section, and select \u0026ldquo;Clone Repository in Container Volume\u0026hellip;\u0026rdquo;, enter https://github.com/jhollowe/blog and hit enter.\nAfter a minute or so of downloading and building your development container, VS Code will be fully functional. You can use the included tasks (Terminal \u0026gt; Run Task\u0026hellip; \u0026gt; Serve) to build and serve the blog. The devcontainer includes everything needed to build the blog and run VS Code. VS Code will even pull in common configuration for tools like Git and SSH.\nModes There are several \u0026ldquo;modes\u0026rdquo; of how to store your files in which you can use devcontainers, each with its own benefits and drawbacks.\n\u0026ldquo;mode\u0026rdquo; Pros Cons container volume * fast\n* fully self-contained environment * hard to access files from outside container mounting a directory * easy to get files in and out\n* allows stateful local files * slow file I/O\n* add/edits/deletes affect the source directory cloning a directory * as fast as a container volume\n* easy to get files into container\n* edits/deletes do not affect the source directory * hard to get files out of container ","description":"Setting up and maintaining a development environment is hard, especially when you need to destructively test features or libraries.","id":9,"section":"posts","tags":["development","containers"],"title":"Getting Started With Devcontainers","uri":"https://johnhollowell.com/blog/posts/getting-started-with-devcontainers/"},{"content":"For environments with complex Active Directory (AD) environments, AD forests can allow flexibility in management and organization of objects.\nBasically, an AD forest allows multiple domains and trees of domains (subdomains) to access and have a shared configuration while still having separate domains with separate host servers.\nThey allow domains to trust and access each other while still maintain separations and boarders. I\u0026rsquo;ve seen this used to allow corporate and client domains to communicate or to have a development domain tree that trust and can cross-talk with the production domain tree while still being separate (this is less common as dev domains are usually just subdomains within the production tree).\nResources\nhttps://en.wikipedia.org/wiki/Active_Directory#Forests,_trees,_and_domains https://ipwithease.com/what-is-a-forest-in-active-directory/ https://www.varonis.com/blog/active-directory-forest/ ","description":"Today I Learned about Active Directory Forests","id":10,"section":"posts","tags":["Active Directory"],"title":"TIL: AD Forests","uri":"https://johnhollowell.com/blog/posts/til-ad-forests/"},{"content":" Changing a user\u0026rsquo;s username on Linux requires no processes be running under that user. This makes sense, but what if we only have that user accessible through a SSh connection? What if we don\u0026rsquo;t want to allow external access to the root account? What if the root account doesn\u0026rsquo;t have a password?\nBackground I was recently spinning up a bunch of Raspberry Pis running Ubuntu 20.04 and some VPSes also running Ubuntu 20.04. I wanted to change the username on these nodes, but only really had access to the ubuntu (sudo) account. While I know I could use a cloud-init file to create a user exactly how I want (more on that in a future post), I didn\u0026rsquo;t want to re-flash the nodes and was not able to add a cloud-init file before boot on the VPSes.\nThe Process Getting The Commands To Run So we can\u0026rsquo;t change the username of a user with running processes, but a SSH session and a bash shell both run under my user whenever I\u0026rsquo;m connected.\nThe main problem is executing a command from a user (and sudo-ing to root) while not having that user have a process running.\nUsing either of the commands below allows a command to be run as the root user which will continue running\n1 2 3 4 5 # interactive shell sudo tmux # non-interactive command sudo -s -- sh -c \u0026#34;nohup \u0026lt;command\u0026gt; \u0026amp;\u0026#34; Now that we can have a command running as root independent of the initiating user, we need to kill everything of the user so we can run usermod commands without difficulty. We kill the processes and wait a couple seconds for them all to terminate. Then we can run whatever commands we need.\n1 ps -o pid= -u \u0026lt;current_username\u0026gt; | xargs kill \u0026amp;\u0026amp; sleep 2 \u0026amp;\u0026amp; \u0026lt;command\u0026gt; What This Command Does ps lists the processes running on the system -o pid= selects only the process ID (pid) and does not create a header for the column (=) -u \u0026lt;username\u0026gt; selects only the processes running under \u0026lt;username\u0026gt; | takes the output of the previous command (ps) and makes it the input of the following command (xargs) xargs takes a line separated list (can change the separator) and turns them into arguments for the following command (-r tells it to do nothing if its input is empty) kill takes a pid (or list of pids) and terminates the process. While kill can send different signals to processes, this uses the default signal (TERM). \u0026amp;\u0026amp; runs the following command if the preceding command exited successfully (exit code 0) sleep 2 wait 2 seconds for the killed processes to terminate Now, we can get to actually changing the username!\nChanging The Username Now that we can run commands as root without our user running processes, we can proceed to change the username and other related tasks.\nThese commands assume you are running as root. If not, you may need to insert some sudo\u0026rsquo;s as necessary\n1 2 3 4 5 6 7 8 9 10 11 # change the user\u0026#39;s username usermod -l \u0026lt;new_username\u0026gt; \u0026lt;current_username\u0026gt; # move the user\u0026#39;s home directory usermod -d /home/\u0026lt;new_username\u0026gt; -m \u0026lt;new_username\u0026gt; # change user\u0026#39;s group name groupmod -n \u0026lt;new_username\u0026gt; \u0026lt;current_username\u0026gt; # replace username in all sudoers files (DANGER!) sed -i.bak \u0026#39;s/\u0026lt;current_username\u0026gt;/\u0026lt;new_username\u0026gt;/g\u0026#39; /etc/sudoers for f in /etc/sudoers.d/*; do sed -i.bak \u0026#39;s/\u0026lt;current_username\u0026gt;/\u0026lt;new_username\u0026gt;/g\u0026#39; $f done Putting it all together When we put it all together (with some supporting script), we get change-username.sh as seen below:\n1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 #!/bin/bash currentUser=$1 newUser=$2 if [ $# -lt 2 ]; then printf \u0026#34;Usage:\\n\\t$0 \u0026lt;current_username\u0026gt; \u0026lt;new_username\u0026gt; [new_home_dir_path]\\n\u0026#34; exit 1 fi if [ $(id -u) -ne 0 ];then echo \u0026#34;Root permission needed for modifying users. Can not continue.\u0026#34; exit 2 fi newHome=\u0026#34;/home/$newUser\u0026#34; if [ $# == 3 ];then newHome=$3 fi echo \u0026#34;Changing $currentUser to $newUser\u0026#34; echo echo \u0026#34;Running this script has the possibility to break sudo (sudoers file(s)) and WILL kill all processes owned by $currentUser\u0026#34; echo \u0026#34;$currentUser will be logged out and will need to reconnect as $newUser\u0026#34; read -n1 -s -r -p $\u0026#39;Continue [Y/n]?\\n\u0026#39; key if [ $key != \u0026#39;\u0026#39; -a $key != \u0026#39;y\u0026#39; -a $key != \u0026#39;Y\u0026#39; ]; then echo \u0026#34;Stopping; no files changed\u0026#34; exit 2 fi # put the main script in /tmp so the user\u0026#39;s home directory can be safely moved tmpFile=$(mktemp) cat \u0026gt; $tmpFile \u0026lt;\u0026lt; EOF #!/bin/bash shopt -s extglob # terminate (nicely) any process owned by $currentUser ps -o pid= -u $currentUser | xargs -r kill # wait for all processes to terminate sleep 2 # forcibly kill any processes that have not already terminated ps -o pid= -u $currentUser | xargs -r kill -s KILL # change the user\u0026#39;s username usermod -l \u0026#34;$newUser\u0026#34; \u0026#34;$currentUser\u0026#34; # move the user\u0026#39;s home directory usermod -d \u0026#34;$newHome\u0026#34; -m \u0026#34;$newUser\u0026#34; # change user\u0026#39;s group name groupmod -n \u0026#34;$newUser\u0026#34; \u0026#34;$currentUser\u0026#34; # replace username in all sudoers files sed -i.bak \u0026#39;s/\u0026#39;$currentUser\u0026#39;/\u0026#39;$newUser\u0026#39;/g\u0026#39; /etc/sudoers for f in /etc/sudoers.d/!(*.bak); do echo \u0026#34;editing \u0026#39;\\$f\u0026#39;\u0026#34; sed -i.bak \u0026#39;s/\u0026#39;$currentUser\u0026#39;/\u0026#39;$newUser\u0026#39;/g\u0026#39; \\$f # TODO fix $f not getting the file path for some reason done EOF echo \u0026#34;Putting script into $tmpFile and running\u0026#34; chmod 777 $tmpFile sudo -s -- bash -c \u0026#34;nohup $tmpFile \u0026gt;/dev/null \u0026amp;\u0026#34; ``` \u0026lt;!-- markdownlint-disable-file --\u0026gt; requirements Command(s) Package bash bash ps, kill procps usermod, groupmod passwd sed sed xargs findutils ","description":"Changing a user's username on Linux requires no processes be running under that user. This makes sense, but what if we only have that user accessible through a SSh connection?","id":11,"section":"posts","tags":["sysadmin"],"title":"Change Username Without Separate Session","uri":"https://johnhollowell.com/blog/posts/change-username-without-separate-session/"},{"content":"One of the most important parts of a working cluster is the interconnection and communication between nodes. While the networking side will not be covered now, a very important aspect will be: passwordless SSH.\nInter-node SSH The first task to getting easy access between nodes is ensuring SSH access between all the nodes.\nWhile not necessary, I recommend adding all your nodes to the /etc/hosts file on each node. For example, the /etc/hosts file might look like\n1 2 3 4 5 6 7 8 9 127.0.0.1 localhost # The following lines are desirable for IPv6 capable hosts ::1 ip6-localhost ip6-loopback fe00::0 ip6-localnet ff00::0 ip6-mcastprefix ff02::1 ip6-allnodes ff02::2 ip6-allrouters ff02::3 ip6-allhosts to which I would add (using the actual IPs of the nodes)\n1 2 3 4 192.168.0.11 node01 192.168.0.12 node02 192.168.0.13 node03 192.168.0.14 node04 Automate adding to your hosts files 1 2 3 4 5 6 7 8 9 for node in localhost node02 node03 node04; do ssh $node \u0026#34;cat | sudo tee -a /etc/hosts \u0026gt; /dev/null\u0026#34; \u0026lt;\u0026lt; EOF 192.168.0.11 node01 192.168.0.12 node02 192.168.0.13 node03 192.168.0.14 node04 EOF done After this is added to your hosts file on all your nodes, from any node you should be able to ssh node1 from any of them successfully after entering your password.\nNOTE: if you have not configured static IP addresses for your nodes, any changes to their IPs will require you changing the hosts file on all your nodes. Passwordless SSH To be able to SSH between nodes without the need for a password, you will need to create an SSH key. This will allow SSH to work in scripts and tools (MPI) without needing user interaction.\nFirst, we need to create a key. There are multiple standards of encryption you can use for SSH keys. The default is RSA, but it is generally considered to be less secure than modern standards. Therefore, these instructions will show how to create a ed25519 key. This will work on your cluster, but some (very) old systems may not support ED25519 keys (RSA keys will generally work everywhere even though they are less secure).\nTo create a key, use this command on one of your nodes:\n1 ssh-keygen -t ed25519 -a 100 -f ~/.ssh/id_ed25519 -C \u0026#34;Inter-node cluster ssh\u0026#34; This article does a good job of breaking down what all the arguments are used for.\nNext, we need our nodes to trust the key we just created. We\u0026rsquo;ll start with getting the current node to trust the key.\n1 ssh-copy-id -i ~/.ssh/id_ed25519 localhost NOTE: If you have already setup NFS with a shared home directory, you don\u0026rsquo;t need to do anything further; the key is accessible and trusted on all the nodes. Now we can just copy these files to all the other nodes so that they can use and will trust this key.\n1 2 3 4 5 for node in node02 node03 node04; do # list all the nodes that should get the key ssh-copy-id -i ~/.ssh/id_ed25519 $node # you will need to enter your password for this step scp ~/.ssh/id_ed25519 $node:.ssh/ ssh $node \u0026#34;chmod 600 ~/.ssh/id_ed25519\u0026#34; # ensure the key is locked down so SSH will accept it. done And to make all the nodes trust each other\u0026rsquo;s fingerprints\n1 2 3 for node in node02 node03 node04; do scp ~/.ssh/known_hosts $node:.ssh/ done We can check that we can SSH into all the nodes without having to enter a password:\n1 2 for node in node2 node3 node4; do ssh $node \u0026#34;hostname\u0026#34; ","description":"One of the most important parts of a working cluster is the interconnection and communication between nodes. While the networking side will not be covered now, a very important aspect will be: passwordless SSH.","id":12,"section":"posts","tags":["SSH","cluster","networks"],"title":"Cluster SSH","uri":"https://johnhollowell.com/blog/posts/cluster-ssh/"},{"content":" So you want to build a Raspberry Pi cluster.\nThe first thing to do is determine the size of a cluster you want to build. You can go with any number greater than one, but I\u0026rsquo;ve found that 4-8 is a good sweet spot between too few nodes to get a real feel of cluster operation and too many nodes to manage and maintain. For this and following posts, I will be assuming a cluster of 4 nodes (node01 to node04).\nHardware To run a cluster you also need some supporting hardware, where N is the number of nodes (examples given as links):\nN Raspberry Pi 4 N Micro SD Cards (16GB or more preferred) 1 gigabit ethernet switch (at least N+1 ports) OR router with N LAN ports (see the Networking section below) N short \u0026ldquo;patch\u0026rdquo; ethernet cables Power Supply (choose one) N USB C power supplies N/4 4-port USB power supplies with N USB C cables N/4 BitScope Quattro Raspberry Pi blades and power supply 1 USB Drive [optional] 1 4-slot case (with heatsinks) [optional] 1 power strip [optional] While you can use older models of the Pi if you already have them, using the most recent version will provide the most performance at the same price. Just make sure you get power cables that are compatible with your nodes.\nYou can also use larger RAM versions, but any amount of RAM should work for a minimally functional cluster. The more memory on your nodes, the larger problems they can solve and more performant they can be (caches for network and local storage and a reduction in swappiness).\nPut together the nodes If you got the BitScope Quattro for power or a case for your Pis, you will want to to get your Pis in place. This is also a great time to put on any heatsinks you have for your Pis.\nI would also recommend taking this time to decide the identity of each node and labeling them with a number or other identifier. I\u0026rsquo;ve decided to use numbers to identify my nodes, so I will use a marker or label to indicate which node is which number. This makes troubleshooting easier later on.\nConnect the wires Once your Pis are all ready to go, we need to connect them to power and network. It is useful to connect power and network cables in the order of the Pis so troubleshooting is easier when something goes wrong. Be sure to make sure all the cables are fully inserted.\nNetworking Connections For networking, you can take two paths:\nUse just a switch and connect the cluster to your home network Use a switch and/or a router to create a dedicated sub-network for your cluster. (You can use a switch to connect more nodes to your router if you have run out of ports on it) I\u0026rsquo;ll be doing the second option as it give better separation from my other devices and allows me to set private IP addresses for my nodes regardless the IPs already in use on my home network.\nRegardless the path your choose, you will need to connect your switch or router\u0026rsquo;s WAN port to your home network so your cluster can access the internet and you can access your nodes. (You could also have your cluster completely air-gapped and use static IPs on the nodes, but not being able to download applications and tools is in my opinion not worth the effort).\nSoftware For this cluster I will be using Ubuntu. Canonical ( the company behind Ubuntu) has done a great job of ensuring Ubuntu is stable on Raspberry Pis (with the help of software from the Raspberry Pi Foundation) and has a 64 bit version available (unlike Raspberry Pi OS as of the time of writing). I will be using 20.04, but the latest LTS version should be fine.\nThere is already a great tutorial on how to install Ubuntu on a Raspberry Pi. Make sure to select the latest LTS version with 64 bit support. Also, we have no need to install a desktop, so you can skip that step.\nConnecting to the nodes If you followed the above tutorial, you should have the IP address of all your nodes. If you can\u0026rsquo;t tell which IP goes to which node, try unplugging the network cables from all but one node, follow the instructions, and repeat for all the other nodes. If you are using a router for your cluster, make sure you are connected to its network (its WiFi or LAN port) and not your home network as the router will block connections from your home network into your cluster network. (if you want, you can create a port forward on your cluster router for port 22 to your so you can SSH into)\nOnce you know what node is what IP address, connect to the first node (which we will use as our head node). Try running ping 1.1.1.1 to ensure your node can connect to the internet. Then follow the cluster SSH guide to setup SSH between all your nodes.\nStatic IP addresses No matter if you have a dedicated cluster network or it is connected to your home network, you should configure static IP addresses for all your nodes so their addresses will not change accidentally in the future.\nPackages In future posts we will install needed packages for configuring our cluster operation, but below are some useful packages that can help with troubleshooting and analyzing cluster performance.\nDon\u0026rsquo;t forget to sudo apt update to make sure you have the latest package database.\nhtop iftop iotop dstat pv ","description":"The basics of getting a cluster of Raspberry Pis powered on and running. Full cluster configuration in later posts.","id":13,"section":"posts","tags":["cluster","networks","hardware"],"title":"Basic Cluster Setup","uri":"https://johnhollowell.com/blog/posts/basic-cluster-setup/"},{"content":"Clemson\u0026rsquo;s School of Computing (SoC) is the place at Clemson where Computer Science (CPSC), Computer Information Systems (CIS), and Digital Production Arts (DPA) are located. Other computing departments (like Computer Engineering) also use some of the SoC\u0026rsquo;s systems. Below are some useful tips and tools for quickly getting going in the SoC.\nAccess Servers The access servers are the way you can access all the SoC computers from off-campus (without having to use the VPN). You can SSH into them and then SSH into other computers through access (or anything else you can do through SSH). You can connect to the access servers using ssh \u0026lt;clemson_username\u0026gt;@access.computing.clemson.edu (or just ssh access.computing.clemson.edu if you computer\u0026rsquo;s username matches your Clemson username). When you connect, you will see a list of lab computers that you can then connect to by using their name (e.g. ssh babbage1). You can also use access2.computing.clemson.edu if the main access server is down or overloaded.\nIf you are on campus, you can directly access the lab computers without the need to go through the access server. Simply use ssh \u0026lt;computer_name\u0026gt;.computing.clemson.edu while on campus (or VPN) and you can directly connect to the machine.\nNOTE: There is a limit in place on the number of connections for each user connecting to the access server. I\u0026rsquo;ve found it to be 4 connections. If you need more connections, consider using both access and access2 or using SSH Multiplexing. Files on the lab computers All the lab computers share your home directory. This means that if you write a file on one computer, you can access it on any other lab computer. This also means your settings for most programs will be the same on all the computers.\nThis also means you can access these files from your own computer as a network drive. Check out these instructions for more information on the subject (use the linux share instructions).\nSSH between computers SSHing between the lab machines can be a bit of a pain when you have to enter your password every time. It also makes it harder to write scripts that use multiple lab computers to work on rendering a project or running some processing. However, if you set up SSH keys on the computers, it allows the lab machines to connect to each other without the need for a password. And since the lab computers share files, once SSH keys are setup on one system, the will work on all the systems.\nThe process of making the keys we will use is fairly straight forward. You can check out more information on what these commands do if you are interested.\n1 2 ssh-keygen -t ed25519 -a 100 -f ~/.ssh/id_ed25519 -C \u0026#34;School of Computing\u0026#34; ssh-copy-id -i ~/.ssh/id_ed25519 localhost This will generate a key for the computers to use, and \u0026ldquo;install\u0026rdquo; it so they will accept connections from that key. Since all the computers have the needed files due to the shared filesystem, all the computers now trust connections from all the other computers.\nSnapshot folder Oh no! You just deleted all the files for your assignment! Not to worry.\nYou home directory (/home/\u0026lt;username\u0026gt;/) on the SoC computers is backed up for just such a problem. Within every folder in your home directory is a hidden folder named .snapshot. It will not appear in any listing of directories, but if you cd into it, you can access all the different backups that are available. You can ls ~/.snapshot/ to see all the different dates that are have backups (there are hourly, daily, and weekly backups). These backup files are read-only, so you will need to copy them back into your home directory to be able to edit them.\nTo access and recover your files, you can either do\n1 2 3 cd ~ cd .snapshot/daily.1234-56-78_0010/path/to/your/files/ cp very-important-file.txt ~/path/to/your/files/ OR\n1 2 3 cd ~/path/to/your/files/ cd .snapshot/daily.1234-56-78_0010 cp very-important-file.txt ~/path/to/your/files/ Teachers\u0026rsquo; Office Hours While is isn\u0026rsquo;t really a technology in the SoC, your teachers are one of best resources to gain knowledge and software development skills. After all, the aren\u0026rsquo;t called teachers for nothing.\nAll teachers are required to have office hours (and so are Teaching Assistants (TAs)). Make use of this time to get to know your teacher, ask questions, and learn more about topics that excite you. It is also a good idea to start projects early (I\u0026rsquo;m not saying I ever did this, but it is what I should have done) so you can ask the teacher questions in office hours before everyone else starts to cram the assignment and office hours get busy.\nYOUR SUGGESTION HERE Is there something you really liked or have often used that you think I should add here or in another post? Get in contact with me and let me know!\n","description":"Clemson's School of Computing can be complicated. Here are some tips and tricks to get started quickly and make the most of the resources you have.","id":14,"section":"posts","tags":[null],"title":"Clemson SoC 101","uri":"https://johnhollowell.com/blog/posts/clemson-soc-101/"},{"content":" Clemson University’s computer labs store files across all the computers using network shares. You usually just access these shares on the lab machines, but you can also add the shares on your own computer as a network drive.\nThere are two main shares on campus: the campus share used by all the Windows (and Mac?) lab machines (e.g. in Cooper Library, Martin, etc.) and the School of Computing’s Linux systems. Both systems can be accessed in a similar way, but with different settings.\nTo access these network shares, you must either be on campus internet (WiFi or Ethernet) or have the Clemson VPN installed and activated on your device. See the CCIT guide for VPN access for more information. The following instructions assume you are using a Windows device to access the shares. Using the credentials as below, you can follow a guide for adding network drives on Mac OS X or Linux (Ubuntu)\nSteps Open File Explorer and go to \u0026ldquo;This PC\u0026rdquo;. Click \u0026ldquo;Map Network Drive\u0026rdquo; in the top ribbon. Choose what drive letter you want the share to appear as (it doesn’t matter what you choose for this; I used \u0026ldquo;Z\u0026rdquo; for this example) Linux Share Windows Share Enter \\\\neon.cs.clemson.edu\\home into the \u0026ldquo;folder\u0026rdquo; box. 5. Check both \"Reconnect as sign-in\" and \"Connect using different credentials\" so the network drive will automatically connect and you can use your Clemson credentials (rather than your local device’s username and password). Click \"Finish\". 6. Enter your University username (with @clemson.edu) and password. (You might have to click \"more choices\" in the login window to be able to enter a new username/password.) \u0026lt;img data-src=\u0026quot;https://johnhollowell.com/blog/posts/accessing-your-clemson-network-shares/neon_creds.png\u0026quot; alt=\u0026quot;example login credentials for neon.cs.clemson.edu\u0026quot; data-caption=\u0026quot;\u0026quot; src=\u0026quot;data:image/svg+xml,%0A%3Csvg xmlns='http://www.w3.org/2000/svg' width='50%25' height='' viewBox='0 0 24 24'%3E%3Cpath fill='none' d='M0 0h24v24H0V0z'/%3E%3Cpath fill='%23aaa' d='M19 3H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zm-1 16H6c-.55 0-1-.45-1-1V6c0-.55.45-1 1-1h12c.55 0 1 .45 1 1v12c0 .55-.45 1-1 1zm-4.44-6.19l-2.35 3.02-1.56-1.88c-.2-.25-.58-.24-.78.01l-1.74 2.23c-.26.33-.02.81.39.81h8.98c.41 0 .65-.47.4-.8l-2.55-3.39c-.19-.26-.59-.26-.79 0z'/%3E%3C/svg%3E\u0026quot; class=\u0026quot;lazyload\u0026quot; style=\u0026quot;width:50%;height:;\u0026quot;/\u0026gt; 7. Click \"OK\". Your School of Computing home directory should now appear under the drive letter you chose. NOTE: When adding new files via the network share, they are created with permissions defined by your umask. You can use chmod xxx \u0026lt;file\u0026gt; to change a files permissions to xxx (view a chmod guide for more information on the chmod command) Enter \\\\home.clemson.edu\\\u0026lt;username\u0026gt; where \u0026lt;username\u0026gt; is your university username. 5. Check both \"Reconnect as sign-in\" and \"Connect using different credentials\" so the network drive will automatically connect and you can use your Clemson credentials (rather than your local device’s username and password). Click \"Finish\". 6. Enter your University username (without @clemson.edu) and password. (You might have to click \"more choices\" in the login window to be able to enter a new username/password.) \u0026lt;img data-src=\u0026quot;https://johnhollowell.com/blog/posts/accessing-your-clemson-network-shares/home_creds.png\u0026quot; alt=\u0026quot;example login credentials for home.clemson.edu\u0026quot; data-caption=\u0026quot;\u0026quot; src=\u0026quot;data:image/svg+xml,%0A%3Csvg xmlns='http://www.w3.org/2000/svg' width='50%25' height='' viewBox='0 0 24 24'%3E%3Cpath fill='none' d='M0 0h24v24H0V0z'/%3E%3Cpath fill='%23aaa' d='M19 3H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zm-1 16H6c-.55 0-1-.45-1-1V6c0-.55.45-1 1-1h12c.55 0 1 .45 1 1v12c0 .55-.45 1-1 1zm-4.44-6.19l-2.35 3.02-1.56-1.88c-.2-.25-.58-.24-.78.01l-1.74 2.23c-.26.33-.02.81.39.81h8.98c.41 0 .65-.47.4-.8l-2.55-3.39c-.19-.26-.59-.26-.79 0z'/%3E%3C/svg%3E\u0026quot; class=\u0026quot;lazyload\u0026quot; style=\u0026quot;width:50%;height:;\u0026quot;/\u0026gt; 7. Click \"OK\". Your Windows home directory should now appear under the drive letter you chose. You now have access to your files as if they were just another drive in your computer. Do note that these drives will be significantly slower than your actual computer drives due to higher latency and lower bandwidth.\n","description":"Clemson University’s computer labs store files across all the computers using network shares. You usually just access these shares on the lab machines, but you can also add the shares on your own computer as a network drive.","id":15,"section":"posts","tags":null,"title":"Accessing Your Clemson Network Shares","uri":"https://johnhollowell.com/blog/posts/accessing-your-clemson-network-shares/"},{"content":" Hey There! I\u0026rsquo;m John. I enjoy coding and problem solving. On the side I do some photography and videography work.\nCheck out my main website for more information about me and to get in contact.\n","description":"","id":16,"section":"","tags":null,"title":"About","uri":"https://johnhollowell.com/blog/about/"},{"content":" I\u0026rsquo;m at my extended family\u0026rsquo;s house way out in the middle of nowhere; barely enough cellular connection for an SMS, let alone trying to use any data.\nThey have DSL, but they are so far out that the signal is poor and it also is horrible speed. The fastest I saw while I was there was 700Kbps.\nWhile it is always a shock to go from over 100Mbps to under 1Mbps, I think that we are in an age where low bandwidth is not a show-stopper. Now obviously, downloading large files and games is a lot more tedious, I have found the \u0026ldquo;set everything to download overnight\u0026rdquo; method works quite well.\nI think there are three main reason why you can do more with less bandwidth than ever before.\nCompression and Codecs We have reached the point where processing power is so cheap, most of the time everything else is the limitation. We are glad to spend some power and time compressing data if it means we have more storage space on our devices or use less data. Website analysis tools will now complain if a webserver doesn\u0026rsquo;t compress its responses with at least gzip.\nWe are (slowly) starting to use new video and audio codecs that compress the crap out of the video/audio stream. Many devices are even starting to have highly performant hardware acceleration for these formats so it doesn\u0026rsquo;t even cause high load or power draw on mobile devices. Services like YouTube automatically convert content to many different qualities and have algorithms to pick the best quality that you can support.\nCaches, CDNS, and Apps Every web browser has a cache. Many even have several tiers of cache to give good hit/miss ratios and speed. If you are going to Facebook, you really should only ever need to receive the logo, most styles, and even some content once. This not only helps on slow connections, but even on fast connections an additional resource request can take a (relatively) long time to do an entire TCP and SSL handshake transaction.\nA further performance increase can be gained through websites\u0026rsquo; use of CDNs for their libraries and assets. If you are loading jQuery, FontAwesome, or bootstrap from local, you are doing it wrong. Pulling these assets from a CDN not only reduces the load on your server and the latency of the client accessing the resource, but allows caching these common resource between sites. If you visit a site using version x of the y library and then visit another site that uses the same version of y, you should be able to cache the first request of that resource and reuse it for any subsequent pages in any site. You can only do this if you using a CDN (and the same, but realistically most resources either have their own CDN or use one of the most common CDNs that everyone else uses).\nAdditionally, the use of site-specific apps (while annoying) allows the apps to only pull new content and \u0026ldquo;cache\u0026rdquo; all the resources needed to display the app. This makes it assured that outside of app updates, all most of the app\u0026rsquo;s traffic is the content you want to see (or ads sigh).\nMobile Focused Pages Thanks the the horrible practices of the Cellular Companies, anything that is loaded on a cellular connection needs to be small to not use much data to fit within limited bandwidth and even more limited data caps. While I have a great distaste for the stupidity of Cell carriers, their limitations have forced encouraged developments in efficient compression and transmission of pages (as well as a lot of bad practices in lazy loading and obfuscating in the name of minifying). Mosts sites will load smaller or more compressed assets when they detect they are on mobile platforms.\nCaveats While I did \u0026ldquo;survive\u0026rdquo; on the limited connection, I knew it was coming and was able to prepare a bit for it. I downloaded a couple of additional playlists on Spotify and synced a few episodes of TV to my phone from my Plex. However, I did not even use these additional downloads. I used the podcasts I had previously downloaded and even downloaded an additional episode while there. The ability in most apps to download content makes even a trickle of internet be enough to slowly build up the content you want.\nI have also recently reset my laptop and had to download FFmpeg while there. It took a few minutes, but it didn\u0026rsquo;t fail. I did want to do some complex computing while there, but since most of what I do is on other computers (servers, remote machines, etc) it was incredibly easy to do what I wanted to do through an SSH connection to a datacenter. This is cheating a little bit but really is not out of the ordinary; even on fast internet I would SSH out to do things I didn\u0026rsquo;t want or couldn\u0026rsquo;t do on my device (thanks Windows). This not not that different from devices like Chromebooks which almost entirely run remotely and require an internet connection to function (or function with all features).\nThis was also a family gathering, so I didn\u0026rsquo;t spend much time on the internet. I could quickly google the answer to win an argument and that was all I needed.\nConclusion Slow internet is still a pain, but I\u0026rsquo;ve grown to appreciate its limitations and work around them. Several trends in computing and content delivery in recent years have made slow internet more bearable. I won\u0026rsquo;t be giving up my high-speed internet any time soon, but slowing down and disconnecting a bit is a nice change of pace in this time where everything has to happen online.\n","description":"While it is always a shock to go from over 100Mbps to under 1Mbps, I think that we are in an age where low bandwidth is not a show-stopper.","id":17,"section":"posts","tags":["web","life","opinion"],"title":"A Trickle Is Better Than Nothing","uri":"https://johnhollowell.com/blog/posts/a-trickle-is-better-than-nothing/"},{"content":"2021. A new year; a new start.\nI\u0026rsquo;ve wanted to start a blog for a while, and I thought I might as well start it on the first of the year. I think I finally have enough things I want to talk about that a blog is worth the effort.\nWhat\u0026rsquo;s in a Name? So why the name \u0026ldquo;/dev/random\u0026rdquo;? Well, I\u0026rsquo;m a geek and this blog will be about anything. I don\u0026rsquo;t want to confine this blog to any one subject (including to just tech) and I want the entirety of the blog to be representative of that. It also give me the opportunity to have a punny subtitle, which I am always appreciative of.\nSo\u0026hellip; Why? This blog is mostly a place for me to put information for my future self and others. Don\u0026rsquo;t expect any deep, rambling prose. I\u0026rsquo;m not a spectacular writer and there are many things in my life that don\u0026rsquo;t merit blogging about. However, I have a very wide range of knowledge which I often will forget by the next time I need to use it. This gives me a way to record my experiences and experiments in a public place to which I can reference others. This blog is also an experiment, how meta is that?\nWhen can I get more of this great content? I would like to at least work on this blog every day. That doesn\u0026rsquo;t mean a new post every month; longer and more detailed posts will take me a bit longer. I might hold a post so a whole series can be release together. I might get bored and never create another post. Who knows?\n","description":"I've wanted to start a blog for a while, and I thought I might as well start it on the first of the year. I think I finally have enough things I want to talk about that a blog is worth the effort.","id":18,"section":"posts","tags":null,"title":"And So It Begins","uri":"https://johnhollowell.com/blog/posts/and-so-it-begins/"}] \ No newline at end of file +[{"content":" I\u0026rsquo;ve had my Steam Deck (256GB version) for a bit over 6 months now and I love it! Being a mostly keyboard-and-mouse gamer, that was a bit of a surprise to me.\nUnboxing Experience Opening the Steam Deck was an easy process. The packaging has fun Valve-y designs and contains the contents well without using unneeded extra packaging.\nSetup (or lack thereof) The initial setup process was as simple as possible. Sign into your Steam account and you are pretty much good to go!\nPlaying Games Playing games just works. With the Proton layer, most Windows games fully work. Even some games with Anti-Cheat software (I\u0026rsquo;ve tried Fall Guys) will work on the Steam Deck.\nDownloading Games Steam recently added a feature which is fantastic for the Deck. If there is a computer running Steam on the local network which already has a game downloaded, Steam will automatically download to your device from the other device, preventing the game from having to be downloaded from the internet and being able to use your much likely much faster LAN. Either machine can cancel this and force the downloading device to pull from the internet, but it is a great feature to be able to pull a game onto the Steam Deck at full 1 Gbps or 2.5 Gbps speed with the use of a USB-C ethernet adapter.\nPerformance It plays the games I want to play (even emulators of more recent consoles) at a stable 40-60 fps with medium-high quality settings. Check out proper reviewers for details, quantitative performance metrics. For more demanding games, I can play them at home by streaming them from my desktop. The Deck can play more demanding games if you lower quality settings and are okay with reducing your target framerate to 30-40 FPS, but it really isn\u0026rsquo;t well suited for it and performs much better in the lighter games at which it excels.\nBattery Life Admittedly, I\u0026rsquo;ve mostly used the Deck at home or in a location with easy access to power, so the battery life of the Deck has not been super important to me. However, for most of the light games that I play on it, the Deck gets 3-4 hours of play time, more than I really want (should) to play in a single sitting. Add an external battery pack and you can game for even longer.\nObviously with a triple A title, the battery is not going to last that long, the Deck will crank out so heat, and the fan will kick into high gear. But I rarely play these on the Deck anyway as I have a gaming desktop for these types of games.\nExploring the Controls This is by far the best part of the Steam Deck in my opinion. There are so many controls and Steam allows you to rebind and configure the inputs in just about any way you want. All the controls are comfortable for my medium-large hands to use. Especially for games which include Steam Input support (mostly just Valve games as of now), the ease of remapping inputs and creating complex input schemes is fantastic. The ability to tie in the gyroscope, back buttons, and touch pads makes it easy to setup a very fluid and natural control. And the ability to share controller profile means that the community (and you!) can share the tweaks that work the best for them and game authors can create custom controller mappings for their games. The control customization deserves its own entire post.\nThe main thing I appreciate is the back L4, L5, R4, and R5 buttons. Since basically no game uses them, they can be mapped to duplicate other buttons. I\u0026rsquo;ve found it useful on a lot of games to remap these buttons to ABXY so that I can use those buttons while using the right stick. For emulated games, I\u0026rsquo;ve really liked mapping the rear buttons to emulator commands, like the \u0026ldquo;Hotkey\u0026rdquo; button for RetroArch. This makes it much easier to perform emulator commands like creating a save point or pausing the game.\nUsing the Desktop Mode While I haven\u0026rsquo;t done a lot of different things in the Desktop Mode, there are times where it is invaluable. Since the Deck is a full Linux computer, you can go in and adjust files, add mod files to games, or just use it as a desktop. Desktop Mode also allows you to install applications not in the Steam store, like indie games, Discord, or anything that works on Linux. The read-only file system can be an issue, but most everything is a flatpak now, so that is less of an issue.\nFavorite Software These tools and apps greatly improve the experience or functionality of the Steam Deck.\nEmuDeck I\u0026rsquo;ll make a more in-depth review of it, but if you like playing old console or arcade games, EmuDeck sets up emulators for just about any console or system you can think of. It streamlines the setup of tools like RetroArch and EmulationStation to allow you to seamlessly play any old game for which you own (or can \u0026ldquo;acquire\u0026rdquo;) the ROMs. It can even add your games as game entries to Steam, allowing you to directly launch an emulated game from Steam.\nDecky Decky adds so many great plugins which can do everything from syncing your play status to Discord to adjusting detailed performance/clock/TDP tunables to controlling how fast/much the Deck can charge.\nHere are the plugins I currently have installed\nEmuDeck Hotkeys (preinstalled by EmuDeck) Network Info AutoSuspend PowerTools KDE Connect KDE Connect is a great tool for many uses, but I have found it the most useful for using another computer/phone as the keyboard for typing in complex passwords. It is super useful if you use a password manager. You can use a device which is already logged into your password manager, copy the credentials for whatever you are logging into on the team Deck and paste it via KDE Connect.\nFavorite Games These games are a combination of games I already enjoyed playing on desktop and games that where specifically reccommended to me to play on the Steam Deck. Enjoy them all!\nVampire Survivors Sky Rouge Fall Guys Kalimba A Short Hike The Future I will likely swap out for an OLED Steam Deck at some point, but more out of a desire to have the latest shiny thing and not due to the Steam Deck LCD being bad. I\u0026rsquo;ve really liked the Steam Deck and am happy I got it. It has opened a whole new world of simpler games to me and allowed me to come back to retro games I played as a kid.\n9/10 - Love it and the gaming it has given me. Would love it to somehow break the laws of physics and get more performance and longer battery without changing its size.\n","description":"","id":0,"section":"posts","tags":["hardware","gaming"],"title":"6 Months with the Steam Deck","uri":"https://johnhollowell.com/blog/posts/steamdeck/"},{"content":" The Tale Begins There I was, triaging a new issue that came in. A Linux VM running in the cloud was hanging when we started trying to run our workload on it. Huh, there was no output at all from the python script; it didn\u0026rsquo;t even create its log file, one of the first things it should do. Logging into the cloud instance, I looked around and noticed there was a python process running for the script we started, so the connection to the host and creating the python process at least worked. Well, since it didn\u0026rsquo;t work the first time, I killed the process and tried running the same command manually to see if there was an issue with the setup of the process. Aaaannnndddd it hung. But it doesn\u0026rsquo;t hang with the exact same NFS mount and AMI (root disk image) in a different cloud account we use.\nWell, this is interesting. Okay, let\u0026rsquo;s just look at the script we are running. Hung. Welp, I guess it is time for the good old turn-it-off-and-on-again fix. Now let\u0026rsquo;s look at the script. That seems fine. Let\u0026rsquo;s look at the python executable binary we are running. Hung. Uh, okay. Let\u0026rsquo;s check the script again. Hung. Well it looks like an NFS issue. Wireshark Time!\nAfter a bunch of test reads and write to the NFS mount with Wireshark slurping up packets, it looks like the client sends out read requests and the server never responds. The TCP connection retransmits the un-ACK\u0026rsquo;d packets until the TCP session times out, sends a RST, and sends the read request again.\nAfter inspecting the traffic in the AWS flow logs and in the cloud-to-on-prem firewall, it seems that all the traffic is correctly making it from the cloud client to the on-prem NFS server. So, what do we do now?\nAfter a bunch of additional tests, I ran a test of incrementally increasing the size of a file being written one byte at a time. The writes started to fail around 1300 bytes. Looking at the traffic in Wireshark, these write requests approached 1500 bytes. While both the server and client were using jumbo frames (9000 MTU), it is possible there is a 1500 MTU link somewhere between these two hosts.\nDiscovering the Path to a Fix Collaborating with our cloud operations team, we confirmed that the Direct Connect between the cloud and on-prem did have a 1500 MTU. However, this did not explain why the client/server could not use the standard Path MTU Discovery (PMTUD) to detect the smaller link and reduce the effective MTU to the lowest MTU along the path.\nPMTUD activates when a frame which is too large for a link is sent with the Don\u0026rsquo;t Fragment (DF) flag set. When network gear receives a frame too large for the MTU of the next hop, it will either fragment the packet or if the DF flag is set, return an ICMP error \u0026ldquo;Fragmentation Needed and Don\u0026rsquo;t Fragment was Set\u0026rdquo; packet to the sender and drop the packet. Testing in the other AWS account, this worked correctly and the TCP session downgraded to a 1500 MTU (technically the MSS was reduced to 1500 not the MTU, but that is a whole other topic). However for some reason in the original account, the session did not reduce to 1500. Comparing a packet capture from both accounts, I noticed that there was no ICMP error response in the broken account.\nAWSucks After much back-and-forth with our cloud ops team, we found that in the broken account there was an additional layer on top of the Direct Connect. The AWS Transit Gateway not only has a maximum MTU of 8500, but also does NOT return an ICMP \u0026ldquo;fragmentation but DF\u0026rdquo; error. So the client or server sends a packet larger than the MTU of the Transit Gateway, the TG drops the packet without informing the sender of why the packet is being dropped, and the sender continues to retransmit the packet for which it has not received an ACK thinking it was just randomly dropped.\nFinding Another Way So PMTUD won\u0026rsquo;t work; great. And we can\u0026rsquo;t reduce the client\u0026rsquo;s MTU to 1500 as there are workloads running on it which must have jumbo frames. Thus began a flurry of research resulting in me learning of Linux\u0026rsquo;s Packet-Later PMTUD. Using the net.ipv4.tcp_mtu_probing kernel tunable, we can enable an MTU (really MSS) size discovery for TCP sessions.\nHow It Works When the sender sends a packet which is too large for a link in the path of an active TCP connection, the too-large packet will be dropped by the network and the sender will not receive an ACK from the receiver for that packet. The sender will then retransmit the data on an exponential backoff until the maximum retransit count is reached. The sender will then send a RST and try a new TCP session (which if tried with the same size packet will just continue to repeat).\nThe tcp_mtu_probing functionality takes over once the standard TCP retransmit limit is reached. With tcp_mtu_probing enabled, the kernel\u0026rsquo;s network stack splits the offending packet into net.ipv4.tcp_base_mss sized packets and sends those packets instead of the too-large packet. For further packets, the network stack will attempt to double the current packet limit until it again fails to ACK the packet. It then uses this new largest packet size for all future packets for the TCP session. Linux 4.1 improves on this functionality by using a binary search instead of multiple doubling of the MSS. The initial reduced packet size starts at tcp_base_mss and then binary searches for the largest functioning MSS between the tcp_base_mss and the MTU of the interface passing the traffic.\nA great article digging deeper into this is Linux and the strange case of the TCP black holes\nConclusion While the ideal solution would have been for AWS to fix their broken, non-compliant network infrastructure, it is unlikely they will ever fix this. Using a solution which is built into the Linux kernel which allows the continued use of Jumbo frames for cloud-local traffic which preventing traffic over the Transit Gateway from breaking due to large packets.\n","description":"A simple issue at work with cloud hosts not being able to access an NFS mount on-prem turn into a multi-month bug hunt which ended with finding a low MTU network path and an AWS \"feature\" (pronounced bug)","id":1,"section":"posts","tags":["cloud","AWS","networks"],"title":"Unraveling the Mystery of NFS Hangs, or How The (Hybrid) Cloud is a Pain","uri":"https://johnhollowell.com/blog/posts/aws-tg-mtu/"},{"content":" I\u0026rsquo;ll start off by saying I love my Framework laptop. The transition from my old 15\u0026quot; laptop to this 13\u0026quot; Framework has been a lot more seamless than I thought it would be. It has worked perfectly for everything I\u0026rsquo;ve put it through.\nMy Experience With My Framework Battery Life Even with the recently-replaced batter in my old laptop, my Framework has a much longer battery life. Likely a combination of both the battery and processor, I\u0026rsquo;m able to get many hours of even a demanding workload. I\u0026rsquo;me able to have Discord open in a video call for hours while having many other browser tabs or games running without the worry of where my charger is.\nLap-ability The one loss from moving from a 15\u0026quot; laptop to a 13\u0026quot; laptop is the lessened ability to use it effectively on my lap while connected to cords. The smaller size of the 13\u0026quot; means that it sits more between my legs rather than fully on top of my legs. This is normally fine, especially since the fan vents to the rear rather than to the right or left so my legs aren\u0026rsquo;t getting blasted with heat, but it does make having cables connected to the ports is difficult and strains the cables\u0026rsquo; connectors.\nThankfully, I typically only need to have my charger connected to my laptop, so I found a solution. Since my charger is a type-c charger, I can just pop out one of my modules and directly connect the charger\u0026rsquo;s cable to the deeply-inset type-c port behind where the module would go. This make only the small cable be pressed against my leg and does not put any strain against the cable.\nCharging Fan One thing that has disappointed about my Framework is the leaf blower it turns into when plugged in to charge (when the battery is discharged). I think a combination of moving from the \u0026ldquo;Better Battery\u0026rdquo; Windows power profile while on battery to \u0026ldquo;Best Performance\u0026rdquo; when plugged in and the extra heat from the high-speed charging capabilities means the fan kicks up to be quite loud when plugging in. I have not played around much with power profiles to try to reduce this, but it typically only lasts for a short time and I almost always prefer the better performance rather than a bit of ignore-able noise for a bit.\nPhysical Camera/Microphone Switches I didn\u0026rsquo;t think this would be a big thing, but it is really nice to be able to have confidence that at the hardware level, my mic and camera are not able to be accessed.\nE Cores As I have a wide, eclectic collection of software I run on a regular basis, I was please to not run into many issues with programs not properly understanding/scheduling with the efficiency cores on the 12th gen Intel processor. There are some tools (e.g. zstd) which doesn\u0026rsquo;t properly gather the cores to use. However this could be due to running some of these quirky tools in WSL and how some tools try to detect hyper-threading to schedule themselves only on physical cores.\nFOMO? Now that 13th gen Intel and AMD mainboards have come out for the 13\u0026quot; Framework, do I feel like I am missing out or should have waited? not at all. If I would have needed a laptop once the 13th gen had come out, I would definitely have chosen to use the 13th gen mainboard, but I am happy with what I have. Especially since I rarely have a use case for a high-performance laptop, I\u0026rsquo;m very comfortable with my 12th gen.\nPart of the appeal of the Framework is that I don\u0026rsquo;t have to have as much of a fear of missing out. The new laptops all have the same hardware outside of the mainboard. If I want a 13th gen laptop, I can easily upgrade my existing laptop to the 13th gen and get a 12th gen computer to use as a server, media PC, etc. And if I keep my laptop for long enough that the hardware is wearing out, I can replace the parts that are broken (or of which I want an improved version) and keep all the remaining parts, reducing the cost of repair and keeping still-good parts from ending up e-waste.\nAs for regrets getting the Framework rather than some other newer system, I have none. I have not stayed as up-to-date with the laptop scene since I\u0026rsquo;m not currently in need of a new one, but the systems that I have seen have not presented any better features or performance for my use cases. Some of the new Apple laptops have been interesting to follow, but I\u0026rsquo;m not a big fan of many aspects of Apple\u0026rsquo;s hardware and ecosystem and I still do come across some software that is not compiled for ARM (a big one being Windows). I love ARM and use it quite a bit in my homelab (mostly Raspberry Pis), but for my main system is just not quite universal enough for a daily driver.\nConclusion Overall, I\u0026rsquo;m very happy with my Framework and would absolutely recommend it to others. Yes, it is more expensive than another laptop with comparable specs, but the Framework\u0026rsquo;s build quality is supreme. If your use of laptops is more disposable, the Framework may not be for you (and that is okay), but I value the goals of the Framework and truly expect to get my money\u0026rsquo;s worth out of the repairability and modularity of the Framework.\n","description":"After living with the 13\" Framework laptop and releases of new specs for the 13\" and plans for the 16\", I've got some thoughts on my Framework","id":2,"section":"posts","tags":["hardware","life"],"title":"Framework Followup","uri":"https://johnhollowell.com/blog/posts/framework-followup/"},{"content":" I recently upgraded my laptop to a Framework laptop since my old trusty laptop\u0026rsquo;s screen cracked and a replacement screen cost as much as some new laptops. These are my initial impressions of the laptop\u0026rsquo;s build, performance, and usability.\nUse Case I have a bit of a minimal use case for my laptop. Since I have a powerful desktop and a fairly performant phone, I don\u0026rsquo;t need my laptop to be a do-everything device. If I need to do something that requires a lot of performance (gaming, heavy development builds, video transcode, etc), I will use my desktop. If I need to quickly do something, I will use the phone that is always in my pocket or on the desk next to me. My laptop fulfils three main functions: portable large-screen remote access to desktop, couch web-browsing and light development, and media consumption while on the road.\nDesktop Remote The main place I will install games and software, store some files, and do high-performance tasks is on my desktop. I often will need or want to do something on my desktop while not sitting at my desk. Be it from a few meters away on the couch or thousands of kilometers away, I will often remote into my desktop from my laptop. There are not really any specific requirements, but a large screen, enough CPU performance to decode the remote screen stream, and good enough networking to get the connection through. This is honestly the lowest performance need for a laptop, but having hardware decode for whatever remote solution would provide long battery life for this use case.\nCouch Computer This is the middle-of-the-road use case in terms of requirements. It is mostly web browsing, some light video consumption, and low-demand development/writing (like writing this blog). I use VS Code devcontainers for just about everything, so being able to run docker and VS Code well is a must. Mostly, this presents as having enough memory for the containers, VS Code (thanks memory-hungry electron), and all the extensions I typically use. Occasionally, having some performance is nice to be able to build a new dev container (fast network to pull dependencies, fast CPU to decompress image layers and compile dependencies, and mostly fast disk to support fast installation of packages, create new layers, etc.) and makes getting started contributing to a new project incredibly streamlined.\nOn-the-road System This is the most taxing use case that I have for my laptop. This is everything from Couch Computer and more. Some video transcoding (compressing) of footage I\u0026rsquo;ve taken, some light (and not-so-light) gaming, and occasionally some heavy network traffic (using my laptop as a portable NAS or sneaker-net).\nThis is also the use case where the connectivity of the laptop is the most important. From hooking into projectors using HDMI, to needing ethernet for some network troubleshooting, to flashing a Raspberry Pi or reading images from an SD card, the most variability in how I interact with my computers is on the road. The ample expansion/connectivity modules make it easier to have the right connector where I want it, when I want it. Also, the ability to move my ports around mean I will never have to do the awkward my-HDMI-is-on-the-wrong-side-for-this-podium dance again. Further, having 4 thunderbolt USB-C ports means that even if there is not an official module for what you want, you can easily connect a dongle or even make your own modules. Always in the data center? make yourself an RS-232 serial port module for interacting with all the serial consoles on your hardware.\nDesktop Replacement As a bonus use case, I will very, very rarely use my laptop at my desk instead of my desktop. My work laptop usually sits on my desk, plugged into a thunderbolt dock connected to all my peripherals and monitors. Every once in a while, I might use this setup with my personal laptop in this setup if I was working on some project on my laptop that would be too cumbersome to move to my desktop but might benefit from the extra monitors and peripherals.\nBuild Form Factor The Framework is a 13.5\u0026quot; laptop with a 3:2 screen ratio. While I\u0026rsquo;m used to my previous laptop\u0026rsquo;s 15\u0026quot; form factor, the added height of the Framework\u0026rsquo;s screen and higher resolution maintains a good amount of screen real estate. It also provides a more compact body which is more portable and takes up less space on a desk. Weighing in at 4.4 lb, it isn\u0026rsquo;t a light laptop, but the incredibly sturdy chassis and zero deck flex on the keyboard are reason enough for the bit of weigh.\nPower and Battery It uses Type-C (USB-PD) for charging via any of the 4 expansion ports when a USB-C expansion module is installed (or really you can directly connect to the type-c ports at the back of the expansion ports). This allows charging from either side of the laptop which brings a great versatility. While writing this, the idle power draw was ~15W at a medium-low screen brightness. Running a benchmark, the draw from the USB-C charger reached ~62W (on a 90W charger).Charging from 0% to ~80% while powered off averaged around 40W. Charging from ~85% to 100% averaged around a 30W draw (~10W to the battery and ~15W to the idle running system).\nKeyboard The keyboard is easy to type on with ample key spacing and a sensible key layout. I wrote this whole post on the Framework\u0026rsquo;s keyboard. The keys have good stabilization and have a comfortable travel distance. The palm rest areas beside the trackpad are large enough to use and the keyboard is centered on the chassis so one hand/wrist is more extended than the other.Overall, an easy keyboard on which to type.\nTrackpad Not much to say about the trackpad, and that is a good thing. The trackpad is a nice size: not too small to be useless and not too large to be cumbersome to use. It has a nice tactile click when pressed (which I rarely notice since I mostly tap-to-click rather than use the actual displacement button method of clicking) and a smooth surface which is easy to swipe across. The trackpad\u0026rsquo;s palm rejection while typing is very good, but the button still functions while the movement is disabled. If you place a lot of weight on the insides of your hands while typing, you may need to be careful to not push too hard on the trackpad while typing. The typical multi-touch gestures work correctly and smoothly zoom, swipe, and the rest.\nSpeakers The speakers on the Framework have impressed me so far. I will use earphones/headphones over speakers most of the time, but the speakers are much better than my previous laptop\u0026rsquo;s speakers and are a nice, usable option. They are quite loud and even at 100% there is no distortion, clipping, or chassis rattle. Although the speakers are down-firing at the front (user-facing side), they are on the angled bevel of the side so even sitting atop a flat surface the speakers fire out and around the chassis to provide a well-balanced sound profile.\nPerformance CPU My Framework performs well. I got the i5 12th gen variant (i5-1240P, up to 4.4 GHz, 4+8 cores) as a low power yet still performant portable system. Following on the Desktop Remote section above, I very rarely need my laptop to be very performant. What I want most of the time is something that can boost to do a little bit of compute while mostly being a power-efficient system that can run web apps, remote desktop software, and YouTube. The system excels at these tasks. I\u0026rsquo;ll leave the hard numbers and comparisons to benchmark publications, but the system has done everything (within reason) I\u0026rsquo;ve thrown at it.\nMemory While it may seem basic, the ability to have socketed memory can\u0026rsquo;t be ignored in modern laptops. Being able to upgrade and/or expand your system\u0026rsquo;s memory down the line is one of the simplest ways to give an old machine a boost. However, a lot of new machines are coming out with soldered memory that can\u0026rsquo;t be upgraded, expanded, or replaced. The availability of 2 SODIMM slots for memory is a great feature for repairability and the longevity of the system.\nCooling and Fan One disappointing aspect of the Framework is its cooling system and fan. When idle, the fan is inaudible and the user-facing components stay cool. However, even when idle the bottom chassis panel gets slightly too warm to hold for a long time. While on a desk, this is not an issue but when on a lap (where the lap in laptop comes from), the heat it a bit too much for bare skin contact and going hand-held with one hand on the bottom for support is not comfortable to hold. However, even when running full-tilt under a stress test, the top (keyboard, trackpad, and palm rest areas) stayed cool and comfortable.\nThe cooling fan, when going at full speed, is loud but does an adequate job of keeping the internals cool and preventing drastic thermal throttling. A concern I had heard from others was with the vent being in the hinge and concerns over the cooling capacity of the system while the screen is closed. After some tests, the hinge cover is shaped to direct the exhaust air out the bottom of the hinge which gives enough airflow to keep the system cool.\nWiFi 6E While I currently don\u0026rsquo;t have any other wifi gear which supports 6E to test against, I believe 6 GHz is going to be super useful in the coming years and having a computer that already supports it is a great feature. And even if it didn\u0026rsquo;t have a 6E chip in it, the Framework\u0026rsquo;s wifi is socketed which allows for future improvement.\nFor what I can test, the Framework\u0026rsquo;s WiFi works well. It gets the maximum speed my Access Point (AP) supports and has very good range. I haven\u0026rsquo;t noticed any difference it reception between different orientations of the laptop, so the antenna placement seems to be the best it can be.\nUsability I/O The ability to select the I/O that your laptop has is one of the obvious usability features of the Framework. The ability to have up to 4 USB-C thunderbolt ports is impressive and the various modules to adapt those ports into other common ports is fantastic. My favorite ability so far is just having a USB-C port on both sides of the laptop. When I was searching for a new laptop, few had a Type-C port and even fewer had at least one on both sides. The Framework works well with all the USB-C and thunderbolt docks and dongles that I have used with it.\nBattery Another great usability feature is the long battery life. The combination of an efficient processor and a high-capacity battery makes the Framework able to stay running for hours.\nSecurity, Privacy, and Webcam For security and privacy, the Framework has several great features. For signing in (on supported OSes), you can use the fingerprint sensor integrated into the power button for authentication. While my previous laptop had a Windows Hello capable camera, the fingerprint reader is just about as easy to use. The fingerprint reader works well\nOn the webcam, the Framework has physical toggles to disable the webcam and disable the microphone (independently). They toggles have a nice red section visible when disabled and the camera has a light when it is active. It is really nice to have physical switches for the cameras, and since I am using the fingerprint sensor for login (instead of the facial recognition of my previous laptop), I can leave the camera disabled most of the time. The camera is 1080p and does a good enough job with challenging situations like low light and high contrast environments.\nScreen The screen is a 2256 x 1504 (3:2) glossy screen. The extra screen real estate is nice for tasks that can make use of the extra vertical space, media consumption which is mostly 16:9 or wider leaves unused space on the screen. The maximum brightness of the screen is quite bright and is easily visible in direct sunlight. The screen also has a light detector which can be used for automatic screen brightness adjustments. However, at least in Windows, the auto brightness works well but causes a massive jump in brightness when adjusting to above ~50%. Due the the glossy, highly-reflective screen, bright sun from behind makes it hard to read the screen even at maximum brightness. I\u0026rsquo;m planning to investigate what matte screen films/protectors are available that I could use to make the screen less reflective. As I will very rarely use my laptop for very color accurate uses, a matte screen would be better.\nWindows Install and Drivers One cautionary note revolves around the newer, less used components in the Framework. I installed Windows 10 and out of the box, the trackpad and WiFi did not work. I had to use an Ethernet dongle (since I did not get the ethernet Framework module) to download the driver pack from Framework\u0026rsquo;s website. It did not automatically get the drivers from Windows Update like most other firmware/drivers. I also tried Ubuntu 22.04, and while it had fully functional WiFi and and trackpad out of the box, it did not properly adjust the screen backlight based on the function keys (but was able to control the brightness manually using the OS settings slider).\nOverall Impressions Overall, I really like my Framework laptop so far. I did not think I would like the smaller size, but setting the display scaling to lower than the default of 200% (I\u0026rsquo;m testing between 175% and 150%) give more than enough screen space for task I need to do on my laptop. After writing this whole post on the keyboard both on a couch and a desk, it is comfortable to type on and quick to pick up touch typing. It is small and portable while having good performance, battery longevity, and screen real estate. I wish it was a bit bigger as I like a laptop with a larger screen, but for the chassis size the screen is nearly 100% of the size of the laptop footprint. With a 11-in-1 USB dongle, it has as much or more connectivity than my desktop. It works flawlessly with thunderbolt docks (at least the ones I have tested). The first install of Windows 10 was a little painful having to install the driver bundle, but that is a small, one-time price to pay for a nice machine on an old OS.\n9.5/10. Would recommend.\n","description":"I recently upgraded my laptop to a Framework laptop since my old trusty laptop's screen cracked and a replacement screen cost as much as new some laptops. These are my initial impressions of the laptop's build, performance, and usability.","id":3,"section":"posts","tags":["hardware","life"],"title":"Framework First Impressions","uri":"https://johnhollowell.com/blog/posts/framework-first-impressions/"},{"content":" Trying to boot off an NVMe drive on older hardware can cause some issues. If you are running an older BIOS/UEFI, it may not have the needed drivers to understand how to talk to a NVMe drive. I ran into this exact issue when trying to boot my Dell R510 from an NVMe drive.\nTo boot from NVMe, I would need to use some shim which could be booted by the BIOS which would chain-boot the actual OS on the NVMe.\nAttempt 1 - Clover The first method I attempted to used was the Clover Bootloader. Clover, while primarily used for Hackintoshes, can have NVMe support added and chain boot to another disk. I wanted to try this first as I would prefer an OS-indifferent solution that would continue to work no matter what I installed on the NVMe.\nI attempted to image Clover onto a USB drive and after several wrong attempts, I finally formatted the USB as fat32 and just copy/pasted the contents to the drive. I then followed instructions I found to enable NVMe compatibility by copying NvmExpressDxe.efi from EFI/CLOVER/drivers/off into EFI/CLOVER/drivers/BIOS/ and EFI/CLOVER/drivers/UEFI/. I then modified the EFI/CLOVER/config.plist file to automatically boot the the NVMe drive after a 5 second pause.\nHowever, I could never get Clover to read this config.plist file. I tried placing it in other paths that were suggested by comments on the internet. I tried reverting to the original file and modifying one small value to ensure I had not messed up the file formatting. Still, I could not get Clover to read the config file and automatically boot from the NVMe drive. It would just remain at the boot selection menu where I could manually select the NVMe to boot from which would then work perfectly.\nAttempt 2 - Proxmox Boot Proxmox comes with the proxmox-boot-tool tool which is used to synchronize all the boot disks with the UEFI (ESP) partition. After giving up on Clover, I looked into proxmox-boot-tool and found I could just place an extra ESP partition on the USB drive and let proxmox-boot-tool keep it up-to-date and synced.\nRather than creating the correct partitions in the correct locations and of the right size, I just did a dd if=/dev/\u0026lt;root pool\u0026gt; of=/dev/\u0026lt;usb drive\u0026gt; bs=1M count=1024 to copy over the first 1 GB of the disk. I then used gparted to delete the main partition (leaving the BIO and ESP partitions) and to give the remaining partitions new UUIDs. I then booted into Proxmox and proxmox-boot-tool format /dev/disk/by-uuid/\u0026lt;USB ESP partition UUID\u0026gt; --force and proxmox-boot-tool init /dev/disk/by-uuid/\u0026lt;USB ESP partition UUID\u0026gt;. Once that finished, I rebooted and the USB drive was used as the boot drive which booted into the main Proxmox OS.\nConclusion I\u0026rsquo;ve had this in place for a few months now and it has worked perfectly through several updates to the boot cmdline options and kernel updates.\n","description":"My process of finding the best way to boot Proxmox off an NVMe drive in an old Dell R510","id":4,"section":"posts","tags":["sysadmin","proxmox"],"title":"NVMe Boot in Proxmox on Older BIOS","uri":"https://johnhollowell.com/blog/posts/nvme-proxmox-bios/"},{"content":" This was my first year going to the All Things Open and my first in-person conference in several years.\nOverall, I really enjoyed the conference and would recommend other\u0026rsquo;s attend. It definitely helped that I already live in Raleigh so I didn\u0026rsquo;t have to travel to the conference, but even traveling to the conference would be a good experience.\nVenue The Raleigh conference center is a spacious venue. The paths to the session rooms are wide and easy to access. Most of the session rooms were large enough to fit everyone in the session. The conference center has ample surrounding parking and food options if the catered sandwiches don\u0026rsquo;t cover your appetite. The sponsor/vendor booths were set up in the atrium with plenty of room to interact with the vendors and still have room to walk past. All the areas were clean and tidy and the HVAC worked well in all but the smallest session room when it was packed.\nVendor Booths There were a lot of vendors spread around the whole atrium area. The conference did an interesting optional gamification addition to the conference: the keynote sessions and each vendor booth had a code which when entered into the conference app would add points to your score. At the end of each day to top scorers were randomly draw for some very nice prizes.\nThere were a lot of really nice vendors present. From large companies like AWS, Microsoft, and Meta to small FOSS organizations like the FSF and OSI. Many vendors had great swag and welcoming representatives to talk to. While most of the companies were definitely focused on selling to enterprise customers, there were many that had personal/community versions of the software available and knowledgeable people to answer technical questions.\nSessions The session subjects covered a wide range of enterprise related to tracks focused on the open source community and collaboration. Some of the sessions were livestreamed for the virtual attendees (and thus recorded) while some were not recorded. I mostly attended the non-recorded sessions as I can watch the recorded sessions later, but all the sessions were well attended.\n","description":"My experience attending All Things Open for the first time","id":5,"section":"posts","tags":["life","ATO"],"title":"All Things Open 2022 Impressions","uri":"https://johnhollowell.com/blog/posts/ato22/"},{"content":" This December I graduated from Clemson University with my Masters degree in Computer Science. There were a lot of things I learned (obviously), but I think the most useful things I learned were not from the course material, but my outside learning and interactions with peers and professors.\nProject Organization All my courses this semester were project based; while some of the grade for the course came from quizzes or homeworks, over 50% came from a semester long project. My experience with these projects greatly differed based on the professor\u0026rsquo;s involvement and whether it was a group project.\nProblem Definition One of my main gripes for several of my project assignments was the complete lack of the professor defining what the project should look like. While there was some guidance on the general category of project that was required, there was little to no guidance of what specific topics were in scope. We submitted a project proposal, which would have helped with validating the acceptability of the project, however the professors rarely commented on the validity of the proposal, let alone return a grade for the proposal in a reasonable amount of time (read: before the end of the semester).\nThis is a perfect example of why requirements gathering and client interaction is such an important part of the development lifecycle. Knowing the plan for the project before spending development time ensures it is not wasted on something that is not the desired result. Having strict requirements allows the developer to precisely match the functionality to the desired outcomes.\nDeliverables Another important aspect which was mostly glossed over was deliverables. While each professor did say a deliverable of the project would be a final paper, specifics on the format, length, and content of the paper were lacking or never given. In addition, other deliverables were requested after the initial assignment was created, often at the very end of the semester. While this is not that uncommon in \u0026ldquo;real life,\u0026rdquo; often added requirements/deliverables will push back the projects due date; not so with school projects which must be done by the end of the semester.\nGroup Work Group work in school is almost always a complete mess. Over the course of my Masters degree, I\u0026rsquo;ve been in some okay groups and a lot of bad groups. I\u0026rsquo;ve been in groups where someone went completely AWOL for several months and only responded to messages when it was time for them to add their name to the deliverables. I\u0026rsquo;ve also been in some groups that were fantastic where the team members understood that occasionally someone might have other stuff they needed to prioritize but everyone would at the end of the semester all contributed equally. The best groups recognized the different skills of each member and assigned tasks to the person that was most capable of completing it.\nGroup work in school is very different from working in teams in industry. In school your group grade is at best 10% based on your individual contribution. This leads some people to not contribute to the team and just accept a 90% at their max grade. In work, if you do not do the tasks assigned to you, no one is going to do your tasks and it is very apparent who\u0026rsquo;s responsibility they are. Getting paid do do the work rather than paying to do the work also drastically changes the motivation and desire to complete the work.\nSelf Learning Most of the course I took in my Masters program covered information I had learned previously either on my own or on the job. This meant that a large portion of the course material was redundant to me. However, these courses gave me the opportunity to deepen my knowledge of the covered material and utilize the professors as a resource to discover new corollary topics to learn on my own. This gave me the opportunity to learn at my own pace and follow the rabbit trails that I find interesting.\nI have also had courses that I had to teach myself; professors that don\u0026rsquo;t teach or teach wrong material. One professor in particular I had to stop going to class as listening to her lectures decreased/confused my pre-existing knowledge on the topic.\nLab Teaching Assistantship I had a lot of fun being a Teaching Assistant (TA) for a undergrad lab section this past semester. I got to befriend some really cool students and get a taste of what it takes to teach. As I would like to teach at some point in the future, this was a fantastic opportunity to understand some of the requirements of teaching, experience the \u0026ldquo;joy\u0026rdquo; of grading, and dealing with students\u0026rsquo; questions and concerns.\n","description":"This December I graduated from Clemson University with my Masters degree in Computer Science. There were a lot of things I learned (obviously), but I think the most useful things I learned were not from the course material, but my outside learning and interactions with peers and professors.","id":6,"section":"posts","tags":["clemson"],"title":"Masters Degree Takeaways","uri":"https://johnhollowell.com/blog/posts/masters-degree-takeaways/"},{"content":" ZFS is a great filesystem that I use on most of my systems and it makes full-drive backups a breeze when I am refreshing hardware in my homelab. However, sometimes I want to backup to a non-ZFS system. These are the steps I use for fast and verified backups to a file on another computer.\nTL;DR: Combine the power of ZFS, zStandard, pv, and netcat to have a fast backup of a ZFS snapshot with verbose metrics of the process and progress.\nBackground If you already know about ZFS, snapshots, replication, and zStandard, feel free to skip this section. ZFS is a next-generation filesystem which supports a lot of great usability, data integrity, and performance features.\nOne of the most useful features are snapshots. Since ZFS is a copy-on-write (COW) filesystem, it can make a \u0026ldquo;copy\u0026rdquo; of an entire filesystem instantly as it just stores the current state and keeps blocks of data even if they later get updated/deleted. This is incredibly useful for backing up a system, as you can make a snapshot of the system instantly while it is running and then take the time to transfer the data.\nZFS can take a snapshot and zfs send the data in a stream that can be piped to a file, other commands, or a zfs receive on another host to load the datasets to that host\u0026rsquo;s storage and make the files available on the live filesystem. Receiving to another system has many benefits, but one major problem is the destination requires a ZFS pool mounted that has enough unused storage to receive all the incoming data. Sometimes this is not feasible, or even if the destination has a working pool it is not desired to mix in another filesystem with the existing data. In this case, sending to a file will store the entire send stream that can later be cat\u0026rsquo;d back to a zfs receive whenever desired.\nOne other tool used in this guide is zStandard. This is a newer compression library with great compression ratios while maintaining fairly high compression speed and incredibly fast decompression speed. I love zStandard and try to use it in everything. It has also had a large adoption increase in the last year or so with many other projects including zStandard compression support (ZFS, btrfs, tor, and Rsync to name a few).\nSetup There are two hosts: one using ZFS which will be backed up (src.example.com), and one host which will store the backup (dest.example.com). This destination host only needs enough storage space to store the (compressed) send stream.\nAll code is run on src.example.com unless otherwise noted. Making a Snapshot ZFS send streams only work on snapshots, so we need to create a snapshot of the current files and data to be able to send it. If you already have a up-to-date snapshot (maybe from automation), you can just uses that snapshot.\nTo create a snapshot, you either need to be root (run the following command with sudo), or have the snapshot ZFS permissions on the dataset. As we will be creating a recursive snapshot of all datasets, it is easier to just run commands as root.\nThe format of the snapshot command is\nzfs snap[shot] pool/datasetA/subdataset/thing1@snapshot-name.\nTo snapshot the \u0026ldquo;testing\u0026rdquo; dataset on my \u0026ldquo;tank\u0026rdquo; pool with the snapshot name \u0026ldquo;backup_2021-01-02_0304\u0026rdquo;, I would use either command\n1 2 zfs snap tank/testing@backup_2021-01-02_0304 zfs snapshot tank/testing@backup_2021-01-02_0304 To backup an entire pool, use zfs snap -r tank@full_backup which will recursively (-r) snapshot the given dataset and all datasets below it.\nDetermining the Size of the Send Now that we have our snapshot, it would be nice to know how much data we will be sending and storing for our backup. We can either get a (fairly accurate) estimate of the size of the send (quick) or get the exact size of the send. Unless you really need to know the exact size of the send, I recommend the fast method\nFast Size We can get an estimate of the size of a send by running the send with the dry-run flag (-n) in verbose mode (-v).\n1 zfs send -R -n -v tank@full_backup The last line should tell you the estimate of the size of the send.\nSlow Size If you really need the exact size of the send, you can use wc to get the total bytes being sent.\n1 zfs send -R tank@full_backup | wc -c If you want to see the speed that zfs can read the send data off your storage, you can use pv (you might need to install it) to see the size and speed.\n1 zfs send -R tank@full-backup | pv \u0026gt; /dev/null #fullsend Now that everything is prepared, we can actually send the data to the destination. We\u0026rsquo;ll start with the most basic form and add on some extra commands to add speed and metrics of the status of the send.\nIn the following examples, the zfs send command is used with the -R flag. This makes an \u0026ldquo;replication\u0026rdquo; send stream which can fully recreate the given snapshot from nothing. You can omit it if that is not the functionality you need.\n-R, \u0026ndash;replicate\nGenerate a replication stream package, which will replicate the specified file system, and all descendent file systems, up to the named snapshot. When received, all properties, snapshots, descendent file systems, and clones are preserved. 1\nBasic Send Getting bits from A to B is pretty easy. We can use SSH to send the data to the destination host and save it as a file2.\n1 zfs send -R tank@full-backup | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap\u0026#34; We can use the size we found earlier to get a rough progress bar. pv can take in the size of the stream and use it to determine an ETA and progress. It can take integer values with units of \u0026ldquo;k\u0026rdquo;, \u0026ldquo;m\u0026rdquo;, \u0026ldquo;g\u0026rdquo;, and \u0026ldquo;t\u0026rdquo;3.\nAssuming we have 24860300556 bytes (23.2GiB), we could use either of the following\n1 2 zfs send -R tank@full-backup | pv -s 24860300556 | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap\u0026#34; zfs send -R tank@full-backup | pv -s 24G | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap\u0026#34; If you have ZFS installed on the destination, you can check validate the send stream using zstreamdump4.\n1 2 # on dest.example.com cat /path/to/saved/file.zfsnap | zstreamdump While this works and is super reliable, it is inefficient in its data storage size and transport cost. The send stream is uncompressed on your destination and SSH can use significant CPU on low-power devices.\nThe next two solutions seek to solve these problems.\nCompression As long as you are not sending a raw or encrypted snapshot, there will be some amount of compressible data in the send stream. We can compress the send stream so it is (a bit) smaller on the destination\u0026rsquo;s storage.\nYou can compress on either the source or the destination, however compressing on the source means less data is transmitted over the network which usually is slower than the CPU needed for compression.\nWe\u0026rsquo;ll use zStandard due to its speed, compression ratio, and adaptable compression level.\nBasic Usage\n1 zfs send -R tank@full-backup | zstd -c | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap\u0026#34; ZStandard can also use an adaptive compression level. This means that if the network is slow and the compressor would otherwise be idle, it can increase the compression level and can also reduce the level if the network speeds up. This does mean that it can be a low compression ratio, but if reduced storage space is desired, the stream can be recompressed (e.g. zstd -d /path/to/saved/file.zfsnap.zst | zstd -T0 -19 /path/to/saved/file_smaller.zfsnap.zst). The minimum and maximum levels for the adaption can be set, but using just --adapt defaults to sane defaults (3 to 15).\nIt can also use multiple threads to fully utilize all the cores in the host. The number of threads can be specified or set to 0 to use the same number of threads as cores (-T0)5. It has a verbose mode (-v) as well which gives insight to the compression level and compression ratio of the stream.\n1 zfs send -R tank@full-backup | zstd -c -v -T0 --adapt=min=1,max=19 | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap.zst\u0026#34; pv can also be used to give progress and speed calculations (however, it seems that the verbose output of zstd conflicts with pv):\n1 zfs send -R tank@full-backup | pv -cN raw -s 24G | zstd -c -T0 --adapt=min=1,max=19 | pv -cN compressed | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap.zst\u0026#34; Local Send Only use the following across a network you trust (not the internet). This method sends data unencrypted. SSH takes a lot of processing power to encrypt data when sending large amounts of data through it. If we are on a secure network where we can sacrifice encryption for speed, we can use netcat instead of ssh.\nHowever, there is not server on the destination (unlike the SSH daemon), so we need to start a netcat server on the destination to listen (-l) for connections on a port (12345) and have it redirecting to the destination file (with pv showing us stats on the receiving side).\n1 2 # on dest.example.com nc -l 12345 | pv \u0026gt; /path/to/saved/file.zfsnap Now we can send it data to save to the file\n1 zfs send -R tank@full-backup | pv -s 24G | nc dest.example.com 12345 Putting it all together 1 2 # on dest.example.com nc -l 12345 | pv \u0026gt; /path/to/saved/file.zfsnap.zst 1 2 3 4 5 6 # on src.example.com snapName=\u0026#39;tank@full-backup\u0026#39; zfs snap -r ${snapName} sendSize=$(zfs send -v --dryrun -R ${snapName} | grep \u0026#34;total estimated\u0026#34; | sed -r \u0026#39;s@total estimated size is ([0-9\\.]+)(.).*@\\1\\n\\2@\u0026#39; | xargs printf \u0026#34;%.0f%s\u0026#34;) zfs send -R ${snapName} | pv -cN raw -s ${sendSize} | zstd -c -T0 --adapt=min=1,max=19 | pv -cN compressed | nc dest.example.com 12345 https://openzfs.github.io/openzfs-docs/man/8/zfs-send.8.html\u0026#160;\u0026#x21a9;\u0026#xfe0e;\nAs far as I know, the .zfsnap is not an official or commonly used extension. However, it helps me know what the file is, so I\u0026rsquo;ve used it here. Use whatever file name and extension you want.\u0026#160;\u0026#x21a9;\u0026#xfe0e;\nhttps://linux.die.net/man/1/pv\u0026#160;\u0026#x21a9;\u0026#xfe0e;\nhttps://linux.die.net/man/8/zstreamdump\u0026#160;\u0026#x21a9;\u0026#xfe0e;\nThe documentation for zStandard notes that using the -T flag with --adapt can cause the level to get stuck low. If you have problems with the compression level getting stuck at a low value, try removing the threads flag.\u0026#160;\u0026#x21a9;\u0026#xfe0e;\n","description":"ZFS is a great filesystem which I use on most of my systems and it makes full-drive backups a breeze. However, sometimes I want to backup to a non-ZFS system. These are the steps I use for fast and verified backups to a file on another computer.","id":7,"section":"posts","tags":["ZFS","backup","sysadmin"],"title":"ZFS Backups to Files","uri":"https://johnhollowell.com/blog/posts/zfs-backups-to-files/"},{"content":"I first noticed Kernel Same-page Merging (KSM) while working with Virtual Machines (VMs) under KVM (in Proxmox VE).\nKSM is a way of reducing physical memory usage by using one physical page of memory for all duplicate copied of that page. It does this by periodically scanning through memory, finding duplicate pages, and de-duplicating them via virtual memory. It is an extension of how the kernel shares pages between fork()\u0026lsquo;ed processes and uses many of the same methods of sharing memory. KSM is most often used with virtualization to de-duplicate memory used by guest Operating Systems (OSs), but can be used for any page of memory which the program registers with KSM to scan. \u0026ldquo;Red Hat found that thanks to KSM, KVM can run as many as 52 Windows XP VMs with 1 GB of RAM each on a server with just 16 GB of RAM.\u0026rdquo;1\nVirtual Memory Background To fully understand how KSM works, a (at least) basic understanding of how virtual memory work is required.\nTo prevent programs from having to know where every other process on the computer is using memory, the kernel (the all-powerful dictator of the OS) tells each process it has memory starting at address 0. It then keeps a record of where in actual (physical) memory each block (page) or the virtual memory is located.\nIt uses this mapping to translate memory addresses each time the process reads or writes to memory.\n© Computer History Museum This virtual memory also allows things like memory-mapped files on disk and Copy-On-Write (COW) pages. When a process clones (forks) itself, it doesn\u0026rsquo;t have to make a copy of all the memory it was using. It simply marks each page as COW. Each process can read from their memory with both virtual addresses pointing to the same physical page (now marked COW), but when either attempts to write to memory, the existing physical page is left inn place (so the other process can still use it) and a new physical page is allocated and mapped to the writer\u0026rsquo;s virtual memory. This allows pages of memory that are not changed in forked processes to use no additional memory.\nthe same process is used by KSM: it finds duplicate pages in the memory ranges registered with it, marks one of the physical pages as COW, and frees the other physical pages after mapping all the virtual pages to the one physical page.\nhttps://kernelnewbies.org/Linux_2_6_32#Kernel_Samepage_Merging_.28memory_deduplication.29\u0026#160;\u0026#x21a9;\u0026#xfe0e;\n","description":"Today I Learned about Kernel Same-page Merging (KSM)","id":8,"section":"posts","tags":["Linux","memory"],"title":"TIL: Kernel Same-page Merging (KSM)","uri":"https://johnhollowell.com/blog/posts/til-ksm/"},{"content":" Please read A Trickle Is Better Than Nothing before reading this post. I just got over having no internet at my apartment for over a week. I was gone a portion of the week, but it was still very inconvenient. Working remotely doesn\u0026rsquo;t help as to get paid I need to have an internet connection (but not necessarily a fast connection).\nWorking Around It While I could have use cellular data to carry me through, I had already used a significant portion of my data cap on various travels this summer. I ended up just going onto campus and working from my laptop in a computer lab.\nWhile on campus (with its wonderful gigabit symmetrical internet), I downloaded some videos from my YouTube Watch Later playlist so I could have some videos to watch at home. I tried to do as much pre-downloading of content I could so I would have it accessible at home.\nMissing the Trickle So I had everything downloaded and I was fine, right? Wrong.\nI do more with my life than just watching YouTube. I play games, I browse social media, and (most frustratingly in this situation) I code. It is impossible to stay up-to-date on PRs and Issues without being able to connect to the internet. While I could have looked at the GutHub website on my phone, I have a lot of nice tooling around Issues/PRs that is on my desktop.\nI also wanted to open some PRs on some FOSS projects I want to improve. I couldn\u0026rsquo;t do a git clone, I couldn\u0026rsquo;t download the devcontainers needed for the new project and language, I couldn\u0026rsquo;t easily research how to do what I wanted in the documentation on StackOverflow. This stopped me dead in my tracks and forced me to either make a trip back to campus to get internet or use the limited cellular data I had left to clone the entire repo and pull all the require container layers.\nWhat If How could it have been if I had at least a small amount of internet? I would still utilize the high-speed connection at campus to download some content to watch, but I would have still been able to pull up the YT page for the video to see comments and the description and to comment and like myself. While it would have taken a while, I could have left the repo and containers to download while I was watching something or making dinner or overnight. I could have refreshed my Issues/PRs and get any updates on their status and checks. I could have seen that a new video was released by my favorite channel and either queue the video to download or go somewhere with internet to quickly download it.\nOverall, I am very grateful for the internet I have. This just makes me appreciate the internet all the more with its redundancy and high availability and goes to prove that the last mile is really the most vulnerable segment of any network or connection.\n","description":"I just got over having no internet at my apartment for over a week, and I can confirm that a trickle is better than nothing.","id":9,"section":"posts","tags":["web"],"title":"Nothing Is Definitely Worse Than a Trickle","uri":"https://johnhollowell.com/blog/posts/nothing-is-definitely-worse-than-a-trickle/"},{"content":" Setting up and maintaining a development environment is hard, especially when you need to destructively test features or libraries. Especially for contributing to a new project, you don\u0026rsquo;t know everything that is needed. Sometimes the install/development instructions assume some base tools or packages that are not included in your development environment of choice.\nIn come devcontainers. Rather than having to search through the README for a project you are wanting to contribute to, installing several packages onto your machine, and troubleshooting when it doesn\u0026rsquo;t work, you can simply open the repository as a devcontainer and you are ready to start contributing. Have a project that requires several separate services (databases, middleware/api server, etc.)? Create a devcontainer using docker-compose and your development environment can launch an entire suit of containers exactly how you need them.\nSetup Install Docker To be able to use containers, we need a container manager: Docker.\nTo get Docker installed, simply follow their instructions\nInstall VS Code To get Visual Studio Code (VS Code) installed, simply follow their instructions\nAdd container remote extension Within VS Code, install the Remote - Containers extension\nClick the Extensions sidebar (or use the \u0026ldquo;Ctrl + Shift + X\u0026rdquo; shortcut) Search for ms-vscode-remote.remote-containers Click \u0026ldquo;Install\u0026rdquo; Test It Out Now that you are ready to use a devcontainer, it is time to test it out!\nYou can grab this blog and use it as the devcontainer to play with. Click on the bottom left in VS Code on the green arrows, find the Container remote section, and select \u0026ldquo;Clone Repository in Container Volume\u0026hellip;\u0026rdquo;, enter https://github.com/jhollowe/blog and hit enter.\nAfter a minute or so of downloading and building your development container, VS Code will be fully functional. You can use the included tasks (Terminal \u0026gt; Run Task\u0026hellip; \u0026gt; Serve) to build and serve the blog. The devcontainer includes everything needed to build the blog and run VS Code. VS Code will even pull in common configuration for tools like Git and SSH.\nModes There are several \u0026ldquo;modes\u0026rdquo; of how to store your files in which you can use devcontainers, each with its own benefits and drawbacks.\n\u0026ldquo;mode\u0026rdquo; Pros Cons container volume * fast\n* fully self-contained environment * hard to access files from outside container mounting a directory * easy to get files in and out\n* allows stateful local files * slow file I/O\n* add/edits/deletes affect the source directory cloning a directory * as fast as a container volume\n* easy to get files into container\n* edits/deletes do not affect the source directory * hard to get files out of container ","description":"Setting up and maintaining a development environment is hard, especially when you need to destructively test features or libraries.","id":10,"section":"posts","tags":["development","containers"],"title":"Getting Started With Devcontainers","uri":"https://johnhollowell.com/blog/posts/getting-started-with-devcontainers/"},{"content":"For environments with complex Active Directory (AD) environments, AD forests can allow flexibility in management and organization of objects.\nBasically, an AD forest allows multiple domains and trees of domains (subdomains) to access and have a shared configuration while still having separate domains with separate host servers.\nThey allow domains to trust and access each other while still maintain separations and boarders. I\u0026rsquo;ve seen this used to allow corporate and client domains to communicate or to have a development domain tree that trust and can cross-talk with the production domain tree while still being separate (this is less common as dev domains are usually just subdomains within the production tree).\nResources\nhttps://en.wikipedia.org/wiki/Active_Directory#Forests,_trees,_and_domains https://ipwithease.com/what-is-a-forest-in-active-directory/ https://www.varonis.com/blog/active-directory-forest/ ","description":"Today I Learned about Active Directory Forests","id":11,"section":"posts","tags":["Active Directory"],"title":"TIL: AD Forests","uri":"https://johnhollowell.com/blog/posts/til-ad-forests/"},{"content":" Changing a user\u0026rsquo;s username on Linux requires no processes be running under that user. This makes sense, but what if we only have that user accessible through a SSh connection? What if we don\u0026rsquo;t want to allow external access to the root account? What if the root account doesn\u0026rsquo;t have a password?\nBackground I was recently spinning up a bunch of Raspberry Pis running Ubuntu 20.04 and some VPSes also running Ubuntu 20.04. I wanted to change the username on these nodes, but only really had access to the ubuntu (sudo) account. While I know I could use a cloud-init file to create a user exactly how I want (more on that in a future post), I didn\u0026rsquo;t want to re-flash the nodes and was not able to add a cloud-init file before boot on the VPSes.\nThe Process Getting The Commands To Run So we can\u0026rsquo;t change the username of a user with running processes, but a SSH session and a bash shell both run under my user whenever I\u0026rsquo;m connected.\nThe main problem is executing a command from a user (and sudo-ing to root) while not having that user have a process running.\nUsing either of the commands below allows a command to be run as the root user which will continue running\n1 2 3 4 5 # interactive shell sudo tmux # non-interactive command sudo -s -- sh -c \u0026#34;nohup \u0026lt;command\u0026gt; \u0026amp;\u0026#34; Now that we can have a command running as root independent of the initiating user, we need to kill everything of the user so we can run usermod commands without difficulty. We kill the processes and wait a couple seconds for them all to terminate. Then we can run whatever commands we need.\n1 ps -o pid= -u \u0026lt;current_username\u0026gt; | xargs kill \u0026amp;\u0026amp; sleep 2 \u0026amp;\u0026amp; \u0026lt;command\u0026gt; What This Command Does ps lists the processes running on the system -o pid= selects only the process ID (pid) and does not create a header for the column (=) -u \u0026lt;username\u0026gt; selects only the processes running under \u0026lt;username\u0026gt; | takes the output of the previous command (ps) and makes it the input of the following command (xargs) xargs takes a line separated list (can change the separator) and turns them into arguments for the following command (-r tells it to do nothing if its input is empty) kill takes a pid (or list of pids) and terminates the process. While kill can send different signals to processes, this uses the default signal (TERM). \u0026amp;\u0026amp; runs the following command if the preceding command exited successfully (exit code 0) sleep 2 wait 2 seconds for the killed processes to terminate Now, we can get to actually changing the username!\nChanging The Username Now that we can run commands as root without our user running processes, we can proceed to change the username and other related tasks.\nThese commands assume you are running as root. If not, you may need to insert some sudo\u0026rsquo;s as necessary\n1 2 3 4 5 6 7 8 9 10 11 # change the user\u0026#39;s username usermod -l \u0026lt;new_username\u0026gt; \u0026lt;current_username\u0026gt; # move the user\u0026#39;s home directory usermod -d /home/\u0026lt;new_username\u0026gt; -m \u0026lt;new_username\u0026gt; # change user\u0026#39;s group name groupmod -n \u0026lt;new_username\u0026gt; \u0026lt;current_username\u0026gt; # replace username in all sudoers files (DANGER!) sed -i.bak \u0026#39;s/\u0026lt;current_username\u0026gt;/\u0026lt;new_username\u0026gt;/g\u0026#39; /etc/sudoers for f in /etc/sudoers.d/*; do sed -i.bak \u0026#39;s/\u0026lt;current_username\u0026gt;/\u0026lt;new_username\u0026gt;/g\u0026#39; $f done Putting it all together When we put it all together (with some supporting script), we get change-username.sh as seen below:\n1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 #!/bin/bash currentUser=$1 newUser=$2 if [ $# -lt 2 ]; then printf \u0026#34;Usage:\\n\\t$0 \u0026lt;current_username\u0026gt; \u0026lt;new_username\u0026gt; [new_home_dir_path]\\n\u0026#34; exit 1 fi if [ $(id -u) -ne 0 ];then echo \u0026#34;Root permission needed for modifying users. Can not continue.\u0026#34; exit 2 fi newHome=\u0026#34;/home/$newUser\u0026#34; if [ $# == 3 ];then newHome=$3 fi echo \u0026#34;Changing $currentUser to $newUser\u0026#34; echo echo \u0026#34;Running this script has the possibility to break sudo (sudoers file(s)) and WILL kill all processes owned by $currentUser\u0026#34; echo \u0026#34;$currentUser will be logged out and will need to reconnect as $newUser\u0026#34; read -n1 -s -r -p $\u0026#39;Continue [Y/n]?\\n\u0026#39; key if [ $key != \u0026#39;\u0026#39; -a $key != \u0026#39;y\u0026#39; -a $key != \u0026#39;Y\u0026#39; ]; then echo \u0026#34;Stopping; no files changed\u0026#34; exit 2 fi # put the main script in /tmp so the user\u0026#39;s home directory can be safely moved tmpFile=$(mktemp) cat \u0026gt; $tmpFile \u0026lt;\u0026lt; EOF #!/bin/bash shopt -s extglob # terminate (nicely) any process owned by $currentUser ps -o pid= -u $currentUser | xargs -r kill # wait for all processes to terminate sleep 2 # forcibly kill any processes that have not already terminated ps -o pid= -u $currentUser | xargs -r kill -s KILL # change the user\u0026#39;s username usermod -l \u0026#34;$newUser\u0026#34; \u0026#34;$currentUser\u0026#34; # move the user\u0026#39;s home directory usermod -d \u0026#34;$newHome\u0026#34; -m \u0026#34;$newUser\u0026#34; # change user\u0026#39;s group name groupmod -n \u0026#34;$newUser\u0026#34; \u0026#34;$currentUser\u0026#34; # replace username in all sudoers files sed -i.bak \u0026#39;s/\u0026#39;$currentUser\u0026#39;/\u0026#39;$newUser\u0026#39;/g\u0026#39; /etc/sudoers for f in /etc/sudoers.d/!(*.bak); do echo \u0026#34;editing \u0026#39;\\$f\u0026#39;\u0026#34; sed -i.bak \u0026#39;s/\u0026#39;$currentUser\u0026#39;/\u0026#39;$newUser\u0026#39;/g\u0026#39; \\$f # TODO fix $f not getting the file path for some reason done EOF echo \u0026#34;Putting script into $tmpFile and running\u0026#34; chmod 777 $tmpFile sudo -s -- bash -c \u0026#34;nohup $tmpFile \u0026gt;/dev/null \u0026amp;\u0026#34; ``` \u0026lt;!-- markdownlint-disable-file --\u0026gt; requirements Command(s) Package bash bash ps, kill procps usermod, groupmod passwd sed sed xargs findutils ","description":"Changing a user's username on Linux requires no processes be running under that user. This makes sense, but what if we only have that user accessible through a SSh connection?","id":12,"section":"posts","tags":["sysadmin"],"title":"Change Username Without Separate Session","uri":"https://johnhollowell.com/blog/posts/change-username-without-separate-session/"},{"content":"One of the most important parts of a working cluster is the interconnection and communication between nodes. While the networking side will not be covered now, a very important aspect will be: passwordless SSH.\nInter-node SSH The first task to getting easy access between nodes is ensuring SSH access between all the nodes.\nWhile not necessary, I recommend adding all your nodes to the /etc/hosts file on each node. For example, the /etc/hosts file might look like\n1 2 3 4 5 6 7 8 9 127.0.0.1 localhost # The following lines are desirable for IPv6 capable hosts ::1 ip6-localhost ip6-loopback fe00::0 ip6-localnet ff00::0 ip6-mcastprefix ff02::1 ip6-allnodes ff02::2 ip6-allrouters ff02::3 ip6-allhosts to which I would add (using the actual IPs of the nodes)\n1 2 3 4 192.168.0.11 node01 192.168.0.12 node02 192.168.0.13 node03 192.168.0.14 node04 Automate adding to your hosts files 1 2 3 4 5 6 7 8 9 for node in localhost node02 node03 node04; do ssh $node \u0026#34;cat | sudo tee -a /etc/hosts \u0026gt; /dev/null\u0026#34; \u0026lt;\u0026lt; EOF 192.168.0.11 node01 192.168.0.12 node02 192.168.0.13 node03 192.168.0.14 node04 EOF done After this is added to your hosts file on all your nodes, from any node you should be able to ssh node1 from any of them successfully after entering your password.\nNOTE: if you have not configured static IP addresses for your nodes, any changes to their IPs will require you changing the hosts file on all your nodes. Passwordless SSH To be able to SSH between nodes without the need for a password, you will need to create an SSH key. This will allow SSH to work in scripts and tools (MPI) without needing user interaction.\nFirst, we need to create a key. There are multiple standards of encryption you can use for SSH keys. The default is RSA, but it is generally considered to be less secure than modern standards. Therefore, these instructions will show how to create a ed25519 key. This will work on your cluster, but some (very) old systems may not support ED25519 keys (RSA keys will generally work everywhere even though they are less secure).\nTo create a key, use this command on one of your nodes:\n1 ssh-keygen -t ed25519 -a 100 -f ~/.ssh/id_ed25519 -C \u0026#34;Inter-node cluster ssh\u0026#34; This article does a good job of breaking down what all the arguments are used for.\nNext, we need our nodes to trust the key we just created. We\u0026rsquo;ll start with getting the current node to trust the key.\n1 ssh-copy-id -i ~/.ssh/id_ed25519 localhost NOTE: If you have already setup NFS with a shared home directory, you don\u0026rsquo;t need to do anything further; the key is accessible and trusted on all the nodes. Now we can just copy these files to all the other nodes so that they can use and will trust this key.\n1 2 3 4 5 for node in node02 node03 node04; do # list all the nodes that should get the key ssh-copy-id -i ~/.ssh/id_ed25519 $node # you will need to enter your password for this step scp ~/.ssh/id_ed25519 $node:.ssh/ ssh $node \u0026#34;chmod 600 ~/.ssh/id_ed25519\u0026#34; # ensure the key is locked down so SSH will accept it. done And to make all the nodes trust each other\u0026rsquo;s fingerprints\n1 2 3 for node in node02 node03 node04; do scp ~/.ssh/known_hosts $node:.ssh/ done We can check that we can SSH into all the nodes without having to enter a password:\n1 2 for node in node2 node3 node4; do ssh $node \u0026#34;hostname\u0026#34; ","description":"One of the most important parts of a working cluster is the interconnection and communication between nodes. While the networking side will not be covered now, a very important aspect will be: passwordless SSH.","id":13,"section":"posts","tags":["SSH","cluster","networks"],"title":"Cluster SSH","uri":"https://johnhollowell.com/blog/posts/cluster-ssh/"},{"content":" So you want to build a Raspberry Pi cluster.\nThe first thing to do is determine the size of a cluster you want to build. You can go with any number greater than one, but I\u0026rsquo;ve found that 4-8 is a good sweet spot between too few nodes to get a real feel of cluster operation and too many nodes to manage and maintain. For this and following posts, I will be assuming a cluster of 4 nodes (node01 to node04).\nHardware To run a cluster you also need some supporting hardware, where N is the number of nodes (examples given as links):\nN Raspberry Pi 4 N Micro SD Cards (16GB or more preferred) 1 gigabit ethernet switch (at least N+1 ports) OR router with N LAN ports (see the Networking section below) N short \u0026ldquo;patch\u0026rdquo; ethernet cables Power Supply (choose one) N USB C power supplies N/4 4-port USB power supplies with N USB C cables N/4 BitScope Quattro Raspberry Pi blades and power supply 1 USB Drive [optional] 1 4-slot case (with heatsinks) [optional] 1 power strip [optional] While you can use older models of the Pi if you already have them, using the most recent version will provide the most performance at the same price. Just make sure you get power cables that are compatible with your nodes.\nYou can also use larger RAM versions, but any amount of RAM should work for a minimally functional cluster. The more memory on your nodes, the larger problems they can solve and more performant they can be (caches for network and local storage and a reduction in swappiness).\nPut together the nodes If you got the BitScope Quattro for power or a case for your Pis, you will want to to get your Pis in place. This is also a great time to put on any heatsinks you have for your Pis.\nI would also recommend taking this time to decide the identity of each node and labeling them with a number or other identifier. I\u0026rsquo;ve decided to use numbers to identify my nodes, so I will use a marker or label to indicate which node is which number. This makes troubleshooting easier later on.\nConnect the wires Once your Pis are all ready to go, we need to connect them to power and network. It is useful to connect power and network cables in the order of the Pis so troubleshooting is easier when something goes wrong. Be sure to make sure all the cables are fully inserted.\nNetworking Connections For networking, you can take two paths:\nUse just a switch and connect the cluster to your home network Use a switch and/or a router to create a dedicated sub-network for your cluster. (You can use a switch to connect more nodes to your router if you have run out of ports on it) I\u0026rsquo;ll be doing the second option as it give better separation from my other devices and allows me to set private IP addresses for my nodes regardless the IPs already in use on my home network.\nRegardless the path your choose, you will need to connect your switch or router\u0026rsquo;s WAN port to your home network so your cluster can access the internet and you can access your nodes. (You could also have your cluster completely air-gapped and use static IPs on the nodes, but not being able to download applications and tools is in my opinion not worth the effort).\nSoftware For this cluster I will be using Ubuntu. Canonical ( the company behind Ubuntu) has done a great job of ensuring Ubuntu is stable on Raspberry Pis (with the help of software from the Raspberry Pi Foundation) and has a 64 bit version available (unlike Raspberry Pi OS as of the time of writing). I will be using 20.04, but the latest LTS version should be fine.\nThere is already a great tutorial on how to install Ubuntu on a Raspberry Pi. Make sure to select the latest LTS version with 64 bit support. Also, we have no need to install a desktop, so you can skip that step.\nConnecting to the nodes If you followed the above tutorial, you should have the IP address of all your nodes. If you can\u0026rsquo;t tell which IP goes to which node, try unplugging the network cables from all but one node, follow the instructions, and repeat for all the other nodes. If you are using a router for your cluster, make sure you are connected to its network (its WiFi or LAN port) and not your home network as the router will block connections from your home network into your cluster network. (if you want, you can create a port forward on your cluster router for port 22 to your so you can SSH into)\nOnce you know what node is what IP address, connect to the first node (which we will use as our head node). Try running ping 1.1.1.1 to ensure your node can connect to the internet. Then follow the cluster SSH guide to setup SSH between all your nodes.\nStatic IP addresses No matter if you have a dedicated cluster network or it is connected to your home network, you should configure static IP addresses for all your nodes so their addresses will not change accidentally in the future.\nPackages In future posts we will install needed packages for configuring our cluster operation, but below are some useful packages that can help with troubleshooting and analyzing cluster performance.\nDon\u0026rsquo;t forget to sudo apt update to make sure you have the latest package database.\nhtop iftop iotop dstat pv ","description":"The basics of getting a cluster of Raspberry Pis powered on and running. Full cluster configuration in later posts.","id":14,"section":"posts","tags":["cluster","networks","hardware"],"title":"Basic Cluster Setup","uri":"https://johnhollowell.com/blog/posts/basic-cluster-setup/"},{"content":"Clemson\u0026rsquo;s School of Computing (SoC) is the place at Clemson where Computer Science (CPSC), Computer Information Systems (CIS), and Digital Production Arts (DPA) are located. Other computing departments (like Computer Engineering) also use some of the SoC\u0026rsquo;s systems. Below are some useful tips and tools for quickly getting going in the SoC.\nAccess Servers The access servers are the way you can access all the SoC computers from off-campus (without having to use the VPN). You can SSH into them and then SSH into other computers through access (or anything else you can do through SSH). You can connect to the access servers using ssh \u0026lt;clemson_username\u0026gt;@access.computing.clemson.edu (or just ssh access.computing.clemson.edu if you computer\u0026rsquo;s username matches your Clemson username). When you connect, you will see a list of lab computers that you can then connect to by using their name (e.g. ssh babbage1). You can also use access2.computing.clemson.edu if the main access server is down or overloaded.\nIf you are on campus, you can directly access the lab computers without the need to go through the access server. Simply use ssh \u0026lt;computer_name\u0026gt;.computing.clemson.edu while on campus (or VPN) and you can directly connect to the machine.\nNOTE: There is a limit in place on the number of connections for each user connecting to the access server. I\u0026rsquo;ve found it to be 4 connections. If you need more connections, consider using both access and access2 or using SSH Multiplexing. Files on the lab computers All the lab computers share your home directory. This means that if you write a file on one computer, you can access it on any other lab computer. This also means your settings for most programs will be the same on all the computers.\nThis also means you can access these files from your own computer as a network drive. Check out these instructions for more information on the subject (use the linux share instructions).\nSSH between computers SSHing between the lab machines can be a bit of a pain when you have to enter your password every time. It also makes it harder to write scripts that use multiple lab computers to work on rendering a project or running some processing. However, if you set up SSH keys on the computers, it allows the lab machines to connect to each other without the need for a password. And since the lab computers share files, once SSH keys are setup on one system, the will work on all the systems.\nThe process of making the keys we will use is fairly straight forward. You can check out more information on what these commands do if you are interested.\n1 2 ssh-keygen -t ed25519 -a 100 -f ~/.ssh/id_ed25519 -C \u0026#34;School of Computing\u0026#34; ssh-copy-id -i ~/.ssh/id_ed25519 localhost This will generate a key for the computers to use, and \u0026ldquo;install\u0026rdquo; it so they will accept connections from that key. Since all the computers have the needed files due to the shared filesystem, all the computers now trust connections from all the other computers.\nSnapshot folder Oh no! You just deleted all the files for your assignment! Not to worry.\nYou home directory (/home/\u0026lt;username\u0026gt;/) on the SoC computers is backed up for just such a problem. Within every folder in your home directory is a hidden folder named .snapshot. It will not appear in any listing of directories, but if you cd into it, you can access all the different backups that are available. You can ls ~/.snapshot/ to see all the different dates that are have backups (there are hourly, daily, and weekly backups). These backup files are read-only, so you will need to copy them back into your home directory to be able to edit them.\nTo access and recover your files, you can either do\n1 2 3 cd ~ cd .snapshot/daily.1234-56-78_0010/path/to/your/files/ cp very-important-file.txt ~/path/to/your/files/ OR\n1 2 3 cd ~/path/to/your/files/ cd .snapshot/daily.1234-56-78_0010 cp very-important-file.txt ~/path/to/your/files/ Teachers\u0026rsquo; Office Hours While is isn\u0026rsquo;t really a technology in the SoC, your teachers are one of best resources to gain knowledge and software development skills. After all, the aren\u0026rsquo;t called teachers for nothing.\nAll teachers are required to have office hours (and so are Teaching Assistants (TAs)). Make use of this time to get to know your teacher, ask questions, and learn more about topics that excite you. It is also a good idea to start projects early (I\u0026rsquo;m not saying I ever did this, but it is what I should have done) so you can ask the teacher questions in office hours before everyone else starts to cram the assignment and office hours get busy.\nYOUR SUGGESTION HERE Is there something you really liked or have often used that you think I should add here or in another post? Get in contact with me and let me know!\n","description":"Clemson's School of Computing can be complicated. Here are some tips and tricks to get started quickly and make the most of the resources you have.","id":15,"section":"posts","tags":["clemson"],"title":"Clemson SoC 101","uri":"https://johnhollowell.com/blog/posts/clemson-soc-101/"},{"content":" Clemson University’s computer labs store files across all the computers using network shares. You usually just access these shares on the lab machines, but you can also add the shares on your own computer as a network drive.\nThere are two main shares on campus: the campus share used by all the Windows (and Mac?) lab machines (e.g. in Cooper Library, Martin, etc.) and the School of Computing’s Linux systems. Both systems can be accessed in a similar way, but with different settings.\nTo access these network shares, you must either be on campus internet (WiFi or Ethernet) or have the Clemson VPN installed and activated on your device. See the CCIT guide for VPN access for more information. The following instructions assume you are using a Windows device to access the shares. Using the credentials as below, you can follow a guide for adding network drives on Mac OS X or Linux (Ubuntu)\nSteps Open File Explorer and go to \u0026ldquo;This PC\u0026rdquo;. Click \u0026ldquo;Map Network Drive\u0026rdquo; in the top ribbon. Choose what drive letter you want the share to appear as (it doesn’t matter what you choose for this; I used \u0026ldquo;Z\u0026rdquo; for this example) Linux Share Windows Share Enter \\\\neon.cs.clemson.edu\\home into the \u0026ldquo;folder\u0026rdquo; box. 5. Check both \"Reconnect as sign-in\" and \"Connect using different credentials\" so the network drive will automatically connect and you can use your Clemson credentials (rather than your local device’s username and password). Click \"Finish\". 6. Enter your University username (with @clemson.edu) and password. (You might have to click \"more choices\" in the login window to be able to enter a new username/password.) \u0026lt;img data-src=\u0026quot;https://johnhollowell.com/blog/posts/accessing-your-clemson-network-shares/neon_creds.png\u0026quot; alt=\u0026quot;example login credentials for neon.cs.clemson.edu\u0026quot; data-caption=\u0026quot;\u0026quot; src=\u0026quot;data:image/svg+xml,%0A%3Csvg xmlns='http://www.w3.org/2000/svg' width='50%25' height='' viewBox='0 0 24 24'%3E%3Cpath fill='none' d='M0 0h24v24H0V0z'/%3E%3Cpath fill='%23aaa' d='M19 3H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zm-1 16H6c-.55 0-1-.45-1-1V6c0-.55.45-1 1-1h12c.55 0 1 .45 1 1v12c0 .55-.45 1-1 1zm-4.44-6.19l-2.35 3.02-1.56-1.88c-.2-.25-.58-.24-.78.01l-1.74 2.23c-.26.33-.02.81.39.81h8.98c.41 0 .65-.47.4-.8l-2.55-3.39c-.19-.26-.59-.26-.79 0z'/%3E%3C/svg%3E\u0026quot; class=\u0026quot;lazyload\u0026quot; style=\u0026quot;width:50%;height:;\u0026quot;/\u0026gt; 7. Click \"OK\". Your School of Computing home directory should now appear under the drive letter you chose. NOTE: When adding new files via the network share, they are created with permissions defined by your umask. You can use chmod xxx \u0026lt;file\u0026gt; to change a files permissions to xxx (view a chmod guide for more information on the chmod command) Enter \\\\home.clemson.edu\\\u0026lt;username\u0026gt; where \u0026lt;username\u0026gt; is your university username. 5. Check both \"Reconnect as sign-in\" and \"Connect using different credentials\" so the network drive will automatically connect and you can use your Clemson credentials (rather than your local device’s username and password). Click \"Finish\". 6. Enter your University username (without @clemson.edu) and password. (You might have to click \"more choices\" in the login window to be able to enter a new username/password.) \u0026lt;img data-src=\u0026quot;https://johnhollowell.com/blog/posts/accessing-your-clemson-network-shares/home_creds.png\u0026quot; alt=\u0026quot;example login credentials for home.clemson.edu\u0026quot; data-caption=\u0026quot;\u0026quot; src=\u0026quot;data:image/svg+xml,%0A%3Csvg xmlns='http://www.w3.org/2000/svg' width='50%25' height='' viewBox='0 0 24 24'%3E%3Cpath fill='none' d='M0 0h24v24H0V0z'/%3E%3Cpath fill='%23aaa' d='M19 3H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zm-1 16H6c-.55 0-1-.45-1-1V6c0-.55.45-1 1-1h12c.55 0 1 .45 1 1v12c0 .55-.45 1-1 1zm-4.44-6.19l-2.35 3.02-1.56-1.88c-.2-.25-.58-.24-.78.01l-1.74 2.23c-.26.33-.02.81.39.81h8.98c.41 0 .65-.47.4-.8l-2.55-3.39c-.19-.26-.59-.26-.79 0z'/%3E%3C/svg%3E\u0026quot; class=\u0026quot;lazyload\u0026quot; style=\u0026quot;width:50%;height:;\u0026quot;/\u0026gt; 7. Click \"OK\". Your Windows home directory should now appear under the drive letter you chose. You now have access to your files as if they were just another drive in your computer. Do note that these drives will be significantly slower than your actual computer drives due to higher latency and lower bandwidth.\n","description":"Clemson University’s computer labs store files across all the computers using network shares. You usually just access these shares on the lab machines, but you can also add the shares on your own computer as a network drive.","id":16,"section":"posts","tags":["clemson"],"title":"Accessing Your Clemson Network Shares","uri":"https://johnhollowell.com/blog/posts/accessing-your-clemson-network-shares/"},{"content":" Hey There! I\u0026rsquo;m John. I enjoy coding and problem solving. On the side I do some photography and videography work.\nCheck out my main website for more information about me and to get in contact.\n","description":"","id":17,"section":"","tags":null,"title":"About","uri":"https://johnhollowell.com/blog/about/"},{"content":" I\u0026rsquo;m at my extended family\u0026rsquo;s house way out in the middle of nowhere; barely enough cellular connection for an SMS, let alone trying to use any data.\nThey have DSL, but they are so far out that the signal is poor and it also is horrible speed. The fastest I saw while I was there was 700Kbps.\nWhile it is always a shock to go from over 100Mbps to under 1Mbps, I think that we are in an age where low bandwidth is not a show-stopper. Now obviously, downloading large files and games is a lot more tedious, I have found the \u0026ldquo;set everything to download overnight\u0026rdquo; method works quite well.\nI think there are three main reason why you can do more with less bandwidth than ever before.\nCompression and Codecs We have reached the point where processing power is so cheap, most of the time everything else is the limitation. We are glad to spend some power and time compressing data if it means we have more storage space on our devices or use less data. Website analysis tools will now complain if a webserver doesn\u0026rsquo;t compress its responses with at least gzip.\nWe are (slowly) starting to use new video and audio codecs that compress the crap out of the video/audio stream. Many devices are even starting to have highly performant hardware acceleration for these formats so it doesn\u0026rsquo;t even cause high load or power draw on mobile devices. Services like YouTube automatically convert content to many different qualities and have algorithms to pick the best quality that you can support.\nCaches, CDNS, and Apps Every web browser has a cache. Many even have several tiers of cache to give good hit/miss ratios and speed. If you are going to Facebook, you really should only ever need to receive the logo, most styles, and even some content once. This not only helps on slow connections, but even on fast connections an additional resource request can take a (relatively) long time to do an entire TCP and SSL handshake transaction.\nA further performance increase can be gained through websites\u0026rsquo; use of CDNs for their libraries and assets. If you are loading jQuery, FontAwesome, or bootstrap from local, you are doing it wrong. Pulling these assets from a CDN not only reduces the load on your server and the latency of the client accessing the resource, but allows caching these common resource between sites. If you visit a site using version x of the y library and then visit another site that uses the same version of y, you should be able to cache the first request of that resource and reuse it for any subsequent pages in any site. You can only do this if you using a CDN (and the same, but realistically most resources either have their own CDN or use one of the most common CDNs that everyone else uses).\nAdditionally, the use of site-specific apps (while annoying) allows the apps to only pull new content and \u0026ldquo;cache\u0026rdquo; all the resources needed to display the app. This makes it assured that outside of app updates, all most of the app\u0026rsquo;s traffic is the content you want to see (or ads sigh).\nMobile Focused Pages Thanks the the horrible practices of the Cellular Companies, anything that is loaded on a cellular connection needs to be small to not use much data to fit within limited bandwidth and even more limited data caps. While I have a great distaste for the stupidity of Cell carriers, their limitations have forced encouraged developments in efficient compression and transmission of pages (as well as a lot of bad practices in lazy loading and obfuscating in the name of minifying). Mosts sites will load smaller or more compressed assets when they detect they are on mobile platforms.\nCaveats While I did \u0026ldquo;survive\u0026rdquo; on the limited connection, I knew it was coming and was able to prepare a bit for it. I downloaded a couple of additional playlists on Spotify and synced a few episodes of TV to my phone from my Plex. However, I did not even use these additional downloads. I used the podcasts I had previously downloaded and even downloaded an additional episode while there. The ability in most apps to download content makes even a trickle of internet be enough to slowly build up the content you want.\nI have also recently reset my laptop and had to download FFmpeg while there. It took a few minutes, but it didn\u0026rsquo;t fail. I did want to do some complex computing while there, but since most of what I do is on other computers (servers, remote machines, etc) it was incredibly easy to do what I wanted to do through an SSH connection to a datacenter. This is cheating a little bit but really is not out of the ordinary; even on fast internet I would SSH out to do things I didn\u0026rsquo;t want or couldn\u0026rsquo;t do on my device (thanks Windows). This not not that different from devices like Chromebooks which almost entirely run remotely and require an internet connection to function (or function with all features).\nThis was also a family gathering, so I didn\u0026rsquo;t spend much time on the internet. I could quickly google the answer to win an argument and that was all I needed.\nConclusion Slow internet is still a pain, but I\u0026rsquo;ve grown to appreciate its limitations and work around them. Several trends in computing and content delivery in recent years have made slow internet more bearable. I won\u0026rsquo;t be giving up my high-speed internet any time soon, but slowing down and disconnecting a bit is a nice change of pace in this time where everything has to happen online.\n","description":"While it is always a shock to go from over 100Mbps to under 1Mbps, I think that we are in an age where low bandwidth is not a show-stopper.","id":18,"section":"posts","tags":["web","life"],"title":"A Trickle Is Better Than Nothing","uri":"https://johnhollowell.com/blog/posts/a-trickle-is-better-than-nothing/"},{"content":"2021. A new year; a new start.\nI\u0026rsquo;ve wanted to start a blog for a while, and I thought I might as well start it on the first of the year. I think I finally have enough things I want to talk about that a blog is worth the effort.\nWhat\u0026rsquo;s in a Name? So why the name \u0026ldquo;/dev/random\u0026rdquo;? Well, I\u0026rsquo;m a geek and this blog will be about anything. I don\u0026rsquo;t want to confine this blog to any one subject (including to just tech) and I want the entirety of the blog to be representative of that. It also give me the opportunity to have a punny subtitle, which I am always appreciative of.\nSo\u0026hellip; Why? This blog is mostly a place for me to put information for my future self and others. Don\u0026rsquo;t expect any deep, rambling prose. I\u0026rsquo;m not a spectacular writer and there are many things in my life that don\u0026rsquo;t merit blogging about. However, I have a very wide range of knowledge which I often will forget by the next time I need to use it. This gives me a way to record my experiences and experiments in a public place to which I can reference others. This blog is also an experiment, how meta is that?\nWhen can I get more of this great content? I would like to at least work on this blog every day. That doesn\u0026rsquo;t mean a new post every month; longer and more detailed posts will take me a bit longer. I might hold a post so a whole series can be release together. I might get bored and never create another post. Who knows?\n","description":"I've wanted to start a blog for a while, and I thought I might as well start it on the first of the year. I think I finally have enough things I want to talk about that a blog is worth the effort.","id":19,"section":"posts","tags":null,"title":"And So It Begins","uri":"https://johnhollowell.com/blog/posts/and-so-it-begins/"}] \ No newline at end of file diff --git a/archive/page/2/index.html b/archive/page/2/index.html index 685b58f..0ede53c 100644 --- a/archive/page/2/index.html +++ b/archive/page/2/index.html @@ -14,6 +14,8 @@ archive posts

Archive

2021
Series framework diff --git a/categories/101/index.xml b/categories/101/index.xml index e0871d4..2bbd67f 100644 --- a/categories/101/index.xml +++ b/categories/101/index.xml @@ -1,3 +1,3 @@ 101 on /dev/random: A Bit of Everythinghttps://johnhollowell.com/blog/categories/101/Recent content in 101 on /dev/random: A Bit of EverythingHugo -- gohugo.ioencontact@johnhollowell.com (John Hollowell)contact@johnhollowell.com (John Hollowell)©2023 John Hollowell, All Rights ReservedFri, 25 Jun 2021 20:06:55 +0000Getting Started With Devcontainershttps://johnhollowell.com/blog/posts/getting-started-with-devcontainers/Fri, 25 Jun 2021 20:06:55 +0000contact@johnhollowell.com (John Hollowell)Fri, 25 Jun 2021 20:06:55 +0000https://johnhollowell.com/blog/posts/getting-started-with-devcontainers/Setting up and maintaining a development environment is hard, especially when you need to destructively test features or libraries. Especially for contributing to a new project, you don’t know everything that is needed. Sometimes the install/development instructions assume some base tools or packages that are not included in your development environment of choice. In come devcontainers. Rather than having to search through the README for a project you are wanting to contribute to, installing several packages onto your machine, and troubleshooting when it doesn’t work, you can simply open the repository as a devcontainer and you are ready to start contributing.John Hollowellfeatured imagedevelopmentcontainers101guideClemson SoC 101https://johnhollowell.com/blog/posts/clemson-soc-101/Mon, 08 Feb 2021 20:08:42 -0500contact@johnhollowell.com (John Hollowell)Mon, 08 Feb 2021 20:08:42 -0500https://johnhollowell.com/blog/posts/clemson-soc-101/Clemson’s School of Computing (SoC) is the place at Clemson where Computer Science (CPSC), Computer Information Systems (CIS), and Digital Production Arts (DPA) are located. Other computing departments (like Computer Engineering) also use some of the SoC’s systems. Below are some useful tips and tools for quickly getting going in the SoC. -Access Servers The access servers are the way you can access all the SoC computers from off-campus (without having to use the VPN).John Hollowell101Getting started in CS at Clemson \ No newline at end of file +Access Servers The access servers are the way you can access all the SoC computers from off-campus (without having to use the VPN).John Hollowellclemson101Getting started in CS at Clemson \ No newline at end of file diff --git a/categories/clemson/index.xml b/categories/clemson/index.xml deleted file mode 100644 index 132f2e6..0000000 --- a/categories/clemson/index.xml +++ /dev/null @@ -1,2 +0,0 @@ -clemson on /dev/random: A Bit of Everythinghttps://johnhollowell.com/blog/categories/clemson/Recent content in clemson on /dev/random: A Bit of EverythingHugo -- gohugo.ioencontact@johnhollowell.com (John Hollowell)contact@johnhollowell.com (John Hollowell)©2023 John Hollowell, All Rights ReservedSun, 07 Feb 2021 14:08:51 -0500Accessing Your Clemson Network Shareshttps://johnhollowell.com/blog/posts/accessing-your-clemson-network-shares/Sun, 07 Feb 2021 14:08:51 -0500contact@johnhollowell.com (John Hollowell)Sun, 07 Feb 2021 14:08:51 -0500https://johnhollowell.com/blog/posts/accessing-your-clemson-network-shares/Clemson University’s computer labs store files across all the computers using network shares. You usually just access these shares on the lab machines, but you can also add the shares on your own computer as a network drive. -There are two main shares on campus: the campus share used by all the Windows (and Mac?) lab machines (e.g. in Cooper Library, Martin, etc.) and the School of Computing’s Linux systems.John HollowellclemsonGetting started in CS at Clemson \ No newline at end of file diff --git a/categories/conference/index.html b/categories/conference/index.html index 6d072be..445c593 100644 --- a/categories/conference/index.html +++ b/categories/conference/index.html @@ -21,6 +21,8 @@ 1 backup 1 +clemson +3 cloud 1 cluster @@ -29,18 +31,18 @@ 1 development 1 +gaming +1 hardware -3 +4 life -6 +4 Linux 1 memory 1 networks 3 -opinion -3 proxmox 1 SSH @@ -53,20 +55,20 @@ 1

Categories


\ No newline at end of file diff --git a/categories/index.json b/categories/index.json index f1f2c31..2fd5c5e 100644 --- a/categories/index.json +++ b/categories/index.json @@ -1 +1 @@ -[{"content":" The Tale Begins There I was, triaging a new issue that came in. A Linux VM running in the cloud was hanging when we started trying to run our workload on it. Huh, there was no output at all from the python script; it didn\u0026rsquo;t even create its log file, one of the first things it should do. Logging into the cloud instance, I looked around and noticed there was a python process running for the script we started, so the connection to the host and creating the python process at least worked. Well, since it didn\u0026rsquo;t work the first time, I killed the process and tried running the same command manually to see if there was an issue with the setup of the process. Aaaannnndddd it hung. But it doesn\u0026rsquo;t hang with the exact same NFS mount and AMI (root disk image) in a different cloud account we use.\nWell, this is interesting. Okay, let\u0026rsquo;s just look at the script we are running. Hung. Welp, I guess it is time for the good old turn-it-off-and-on-again fix. Now let\u0026rsquo;s look at the script. That seems fine. Let\u0026rsquo;s look at the python executable binary we are running. Hung. Uh, okay. Let\u0026rsquo;s check the script again. Hung. Well it looks like an NFS issue. Wireshark Time!\nAfter a bunch of test reads and write to the NFS mount with Wireshark slurping up packets, it looks like the client sends out read requests and the server never responds. The TCP connection retransmits the un-ACK\u0026rsquo;d packets until the TCP session times out, sends a RST, and sends the read request again.\nAfter inspecting the traffic in the AWS flow logs and in the cloud-to-on-prem firewall, it seems that all the traffic is correctly making it from the cloud client to the on-prem NFS server. So, what do we do now?\nAfter a bunch of additional tests, I ran a test of incrementally increasing the size of a file being written one byte at a time. The writes started to fail around 1300 bytes. Looking at the traffic in Wireshark, these write requests approached 1500 bytes. While both the server and client were using jumbo frames (9000 MTU), it is possible there is a 1500 MTU link somewhere between these two hosts.\nDiscovering the Path to a Fix Collaborating with our cloud operations team, we confirmed that the Direct Connect between the cloud and on-prem did have a 1500 MTU. However, this did not explain why the client/server could not use the standard Path MTU Discovery (PMTUD) to detect the smaller link and reduce the effective MTU to the lowest MTU along the path.\nPMTUD activates when a frame which is too large for a link is sent with the Don\u0026rsquo;t Fragment (DF) flag set. When network gear receives a frame too large for the MTU of the next hop, it will either fragment the packet or if the DF flag is set, return an ICMP error \u0026ldquo;Fragmentation Needed and Don\u0026rsquo;t Fragment was Set\u0026rdquo; packet to the sender and drop the packet. Testing in the other AWS account, this worked correctly and the TCP session downgraded to a 1500 MTU (technically the MSS was reduced to 1500 not the MTU, but that is a whole other topic). However for some reason in the original account, the session did not reduce to 1500. Comparing a packet capture from both accounts, I noticed that there was no ICMP error response in the broken account.\nAWSucks After much back-and-forth with our cloud ops team, we found that in the broken account there was an additional layer on top of the Direct Connect. The AWS Transit Gateway not only has a maximum MTU of 8500, but also does NOT return an ICMP \u0026ldquo;fragmentation but DF\u0026rdquo; error. So the client or server sends a packet larger than the MTU of the Transit Gateway, the TG drops the packet without informing the sender of why the packet is being dropped, and the sender continues to retransmit the packet for which it has not received an ACK thinking it was just randomly dropped.\nFinding Another Way So PMTUD won\u0026rsquo;t work; great. And we can\u0026rsquo;t reduce the client\u0026rsquo;s MTU to 1500 as there are workloads running on it which must have jumbo frames. Thus began a flurry of research resulting in me learning of Linux\u0026rsquo;s Packet-Later PMTUD. Using the net.ipv4.tcp_mtu_probing kernel tunable, we can enable an MTU (really MSS) size discovery for TCP sessions.\nHow It Works When the sender sends a packet which is too large for a link in the path of an active TCP connection, the too-large packet will be dropped by the network and the sender will not receive an ACK from the receiver for that packet. The sender will then retransmit the data on an exponential backoff until the maximum retransit count is reached. The sender will then send a RST and try a new TCP session (which if tried with the same size packet will just continue to repeat).\nThe tcp_mtu_probing functionality takes over once the standard TCP retransmit limit is reached. With tcp_mtu_probing enabled, the kernel\u0026rsquo;s network stack splits the offending packet into net.ipv4.tcp_base_mss sized packets and sends those packets instead of the too-large packet. For further packets, the network stack will attempt to double the current packet limit until it again fails to ACK the packet. It then uses this new largest packet size for all future packets for the TCP session. Linux 4.1 improves on this functionality by using a binary search instead of multiple doubling of the MSS. The initial reduced packet size starts at tcp_base_mss and then binary searches for the largest functioning MSS between the tcp_base_mss and the MTU of the interface passing the traffic.\nA great article digging deeper into this is Linux and the strange case of the TCP black holes\nConclusion While the ideal solution would have been for AWS to fix their broken, non-compliant network infrastructure, it is unlikely they will ever fix this. Using a solution which is built into the Linux kernel which allows the continued use of Jumbo frames for cloud-local traffic which preventing traffic over the Transit Gateway from breaking due to large packets.\n","description":"A simple issue at work with cloud hosts not being able to access an NFS mount on-prem turn into a multi-month bug hunt which ended with finding a low MTU network path and an AWS \"feature\" (pronounced bug)","id":0,"section":"posts","tags":["cloud","AWS","networks"],"title":"Unraveling the Mystery of NFS Hangs, or How The (Hybrid) Cloud is a Pain","uri":"https://johnhollowell.com/blog/posts/aws-tg-mtu/"},{"content":" I\u0026rsquo;ll start off by saying I love my Framework laptop. The transition from my old 15\u0026quot; laptop to this 13\u0026quot; Framework has been a lot more seamless than I thought it would be. It has worked perfectly for everything I\u0026rsquo;ve put it through.\nMy Experience With My Framework Battery Life Even with the recently-replaced batter in my old laptop, my Framework has a much longer battery life. Likely a combination of both the battery and processor, I\u0026rsquo;m able to get many hours of even a demanding workload. I\u0026rsquo;me able to have Discord open in a video call for hours while having many other browser tabs or games running without the worry of where my charger is.\nLap-ability The one loss from moving from a 15\u0026quot; laptop to a 13\u0026quot; laptop is the lessened ability to use it effectively on my lap while connected to cords. The smaller size of the 13\u0026quot; means that it sits more between my legs rather than fully on top of my legs. This is normally fine, especially since the fan vents to the rear rather than to the right or left so my legs aren\u0026rsquo;t getting blasted with heat, but it does make having cables connected to the ports is difficult and strains the cables\u0026rsquo; connectors.\nThankfully, I typically only need to have my charger connected to my laptop, so I found a solution. Since my charger is a type-c charger, I can just pop out one of my modules and directly connect the charger\u0026rsquo;s cable to the deeply-inset type-c port behind where the module would go. This make only the small cable be pressed against my leg and does not put any strain against the cable.\nCharging Fan One thing that has disappointed about my Framework is the leaf blower it turns into when plugged in to charge (when the battery is discharged). I think a combination of moving from the \u0026ldquo;Better Battery\u0026rdquo; Windows power profile while on battery to \u0026ldquo;Best Performance\u0026rdquo; when plugged in and the extra heat from the high-speed charging capabilities means the fan kicks up to be quite loud when plugging in. I have not played around much with power profiles to try to reduce this, but it typically only lasts for a short time and I almost always prefer the better performance rather than a bit of ignore-able noise for a bit.\nPhysical Camera/Microphone Switches I didn\u0026rsquo;t think this would be a big thing, but it is really nice to be able to have confidence that at the hardware level, my mic and camera are not able to be accessed.\nE Cores As I have a wide, eclectic collection of software I run on a regular basis, I was please to not run into many issues with programs not properly understanding/scheduling with the efficiency cores on the 12th gen Intel processor. There are some tools (e.g. zstd) which doesn\u0026rsquo;t properly gather the cores to use. However this could be due to running some of these quirky tools in WSL and how some tools try to detect hyper-threading to schedule themselves only on physical cores.\nFOMO? Now that 13th gen Intel and AMD mainboards have come out for the 13\u0026quot; Framework, do I feel like I am missing out or should have waited? not at all. If I would have needed a laptop once the 13th gen had come out, I would definitely have chosen to use the 13th gen mainboard, but I am happy with what I have. Especially since I rarely have a use case for a high-performance laptop, I\u0026rsquo;m very comfortable with my 12th gen.\nPart of the appeal of the Framework is that I don\u0026rsquo;t have to have as much of a fear of missing out. The new laptops all have the same hardware outside of the mainboard. If I want a 13th gen laptop, I can easily upgrade my existing laptop to the 13th gen and get a 12th gen computer to use as a server, media PC, etc. And if I keep my laptop for long enough that the hardware is wearing out, I can replace the parts that are broken (or of which I want an improved version) and keep all the remaining parts, reducing the cost of repair and keeping still-good parts from ending up e-waste.\nAs for regrets getting the Framework rather than some other newer system, I have none. I have not stayed as up-to-date with the laptop scene since I\u0026rsquo;m not currently in need of a new one, but the systems that I have seen have not presented any better features or performance for my use cases. Some of the new Apple laptops have been interesting to follow, but I\u0026rsquo;m not a big fan of many aspects of Apple\u0026rsquo;s hardware and ecosystem and I still do come across some software that is not compiled for ARM (a big one being Windows). I love ARM and use it quite a bit in my homelab (mostly Raspberry Pis), but for my main system is just not quite universal enough for a daily driver.\nConclusion Overall, I\u0026rsquo;m very happy with my Framework and would absolutely recommend it to others. Yes, it is more expensive than another laptop with comparable specs, but the Framework\u0026rsquo;s build quality is supreme. If your use of laptops is more disposable, the Framework may not be for you (and that is okay), but I value the goals of the Framework and truly expect to get my money\u0026rsquo;s worth out of the repairability and modularity of the Framework.\n","description":"After living with the 13\" Framework laptop and releases of new specs for the 13\" and plans for the 16\", I've got some thoughts on my Framework","id":1,"section":"posts","tags":["hardware","life"],"title":"Framework Followup","uri":"https://johnhollowell.com/blog/posts/framework-followup/"},{"content":" I recently upgraded my laptop to a Framework laptop since my old trusty laptop\u0026rsquo;s screen cracked and a replacement screen cost as much as some new laptops. These are my initial impressions of the laptop\u0026rsquo;s build, performance, and usability.\nUse Case I have a bit of a minimal use case for my laptop. Since I have a powerful desktop and a fairly performant phone, I don\u0026rsquo;t need my laptop to be a do-everything device. If I need to do something that requires a lot of performance (gaming, heavy development builds, video transcode, etc), I will use my desktop. If I need to quickly do something, I will use the phone that is always in my pocket or on the desk next to me. My laptop fulfils three main functions: portable large-screen remote access to desktop, couch web-browsing and light development, and media consumption while on the road.\nDesktop Remote The main place I will install games and software, store some files, and do high-performance tasks is on my desktop. I often will need or want to do something on my desktop while not sitting at my desk. Be it from a few meters away on the couch or thousands of kilometers away, I will often remote into my desktop from my laptop. There are not really any specific requirements, but a large screen, enough CPU performance to decode the remote screen stream, and good enough networking to get the connection through. This is honestly the lowest performance need for a laptop, but having hardware decode for whatever remote solution would provide long battery life for this use case.\nCouch Computer This is the middle-of-the-road use case in terms of requirements. It is mostly web browsing, some light video consumption, and low-demand development/writing (like writing this blog). I use VS Code devcontainers for just about everything, so being able to run docker and VS Code well is a must. Mostly, this presents as having enough memory for the containers, VS Code (thanks memory-hungry electron), and all the extensions I typically use. Occasionally, having some performance is nice to be able to build a new dev container (fast network to pull dependencies, fast CPU to decompress image layers and compile dependencies, and mostly fast disk to support fast installation of packages, create new layers, etc.) and makes getting started contributing to a new project incredibly streamlined.\nOn-the-road System This is the most taxing use case that I have for my laptop. This is everything from Couch Computer and more. Some video transcoding (compressing) of footage I\u0026rsquo;ve taken, some light (and not-so-light) gaming, and occasionally some heavy network traffic (using my laptop as a portable NAS or sneaker-net).\nThis is also the use case where the connectivity of the laptop is the most important. From hooking into projectors using HDMI, to needing ethernet for some network troubleshooting, to flashing a Raspberry Pi or reading images from an SD card, the most variability in how I interact with my computers is on the road. The ample expansion/connectivity modules make it easier to have the right connector where I want it, when I want it. Also, the ability to move my ports around mean I will never have to do the awkward my-HDMI-is-on-the-wrong-side-for-this-podium dance again. Further, having 4 thunderbolt USB-C ports means that even if there is not an official module for what you want, you can easily connect a dongle or even make your own modules. Always in the data center? make yourself an RS-232 serial port module for interacting with all the serial consoles on your hardware.\nDesktop Replacement As a bonus use case, I will very, very rarely use my laptop at my desk instead of my desktop. My work laptop usually sits on my desk, plugged into a thunderbolt dock connected to all my peripherals and monitors. Every once in a while, I might use this setup with my personal laptop in this setup if I was working on some project on my laptop that would be too cumbersome to move to my desktop but might benefit from the extra monitors and peripherals.\nBuild Form Factor The Framework is a 13.5\u0026quot; laptop with a 3:2 screen ratio. While I\u0026rsquo;m used to my previous laptop\u0026rsquo;s 15\u0026quot; form factor, the added height of the Framework\u0026rsquo;s screen and higher resolution maintains a good amount of screen real estate. It also provides a more compact body which is more portable and takes up less space on a desk. Weighing in at 4.4 lb, it isn\u0026rsquo;t a light laptop, but the incredibly sturdy chassis and zero deck flex on the keyboard are reason enough for the bit of weigh.\nPower and Battery It uses Type-C (USB-PD) for charging via any of the 4 expansion ports when a USB-C expansion module is installed (or really you can directly connect to the type-c ports at the back of the expansion ports). This allows charging from either side of the laptop which brings a great versatility. While writing this, the idle power draw was ~15W at a medium-low screen brightness. Running a benchmark, the draw from the USB-C charger reached ~62W (on a 90W charger).Charging from 0% to ~80% while powered off averaged around 40W. Charging from ~85% to 100% averaged around a 30W draw (~10W to the battery and ~15W to the idle running system).\nKeyboard The keyboard is easy to type on with ample key spacing and a sensible key layout. I wrote this whole post on the Framework\u0026rsquo;s keyboard. The keys have good stabilization and have a comfortable travel distance. The palm rest areas beside the trackpad are large enough to use and the keyboard is centered on the chassis so one hand/wrist is more extended than the other.Overall, an easy keyboard on which to type.\nTrackpad Not much to say about the trackpad, and that is a good thing. The trackpad is a nice size: not too small to be useless and not too large to be cumbersome to use. It has a nice tactile click when pressed (which I rarely notice since I mostly tap-to-click rather than use the actual displacement button method of clicking) and a smooth surface which is easy to swipe across. The trackpad\u0026rsquo;s palm rejection while typing is very good, but the button still functions while the movement is disabled. If you place a lot of weight on the insides of your hands while typing, you may need to be careful to not push too hard on the trackpad while typing. The typical multi-touch gestures work correctly and smoothly zoom, swipe, and the rest.\nSpeakers The speakers on the Framework have impressed me so far. I will use earphones/headphones over speakers most of the time, but the speakers are much better than my previous laptop\u0026rsquo;s speakers and are a nice, usable option. They are quite loud and even at 100% there is no distortion, clipping, or chassis rattle. Although the speakers are down-firing at the front (user-facing side), they are on the angled bevel of the side so even sitting atop a flat surface the speakers fire out and around the chassis to provide a well-balanced sound profile.\nPerformance CPU My Framework performs well. I got the i5 12th gen variant (i5-1240P, up to 4.4 GHz, 4+8 cores) as a low power yet still performant portable system. Following on the Desktop Remote section above, I very rarely need my laptop to be very performant. What I want most of the time is something that can boost to do a little bit of compute while mostly being a power-efficient system that can run web apps, remote desktop software, and YouTube. The system excels at these tasks. I\u0026rsquo;ll leave the hard numbers and comparisons to benchmark publications, but the system has done everything (within reason) I\u0026rsquo;ve thrown at it.\nMemory While it may seem basic, the ability to have socketed memory can\u0026rsquo;t be ignored in modern laptops. Being able to upgrade and/or expand your system\u0026rsquo;s memory down the line is one of the simplest ways to give an old machine a boost. However, a lot of new machines are coming out with soldered memory that can\u0026rsquo;t be upgraded, expanded, or replaced. The availability of 2 SODIMM slots for memory is a great feature for repairability and the longevity of the system.\nCooling and Fan One disappointing aspect of the Framework is its cooling system and fan. When idle, the fan is inaudible and the user-facing components stay cool. However, even when idle the bottom chassis panel gets slightly too warm to hold for a long time. While on a desk, this is not an issue but when on a lap (where the lap in laptop comes from), the heat it a bit too much for bare skin contact and going hand-held with one hand on the bottom for support is not comfortable to hold. However, even when running full-tilt under a stress test, the top (keyboard, trackpad, and palm rest areas) stayed cool and comfortable.\nThe cooling fan, when going at full speed, is loud but does an adequate job of keeping the internals cool and preventing drastic thermal throttling. A concern I had heard from others was with the vent being in the hinge and concerns over the cooling capacity of the system while the screen is closed. After some tests, the hinge cover is shaped to direct the exhaust air out the bottom of the hinge which gives enough airflow to keep the system cool.\nWiFi 6E While I currently don\u0026rsquo;t have any other wifi gear which supports 6E to test against, I believe 6 GHz is going to be super useful in the coming years and having a computer that already supports it is a great feature. And even if it didn\u0026rsquo;t have a 6E chip in it, the Framework\u0026rsquo;s wifi is socketed which allows for future improvement.\nFor what I can test, the Framework\u0026rsquo;s WiFi works well. It gets the maximum speed my Access Point (AP) supports and has very good range. I haven\u0026rsquo;t noticed any difference it reception between different orientations of the laptop, so the antenna placement seems to be the best it can be.\nUsability I/O The ability to select the I/O that your laptop has is one of the obvious usability features of the Framework. The ability to have up to 4 USB-C thunderbolt ports is impressive and the various modules to adapt those ports into other common ports is fantastic. My favorite ability so far is just having a USB-C port on both sides of the laptop. When I was searching for a new laptop, few had a Type-C port and even fewer had at least one on both sides. The Framework works well with all the USB-C and thunderbolt docks and dongles that I have used with it.\nBattery Another great usability feature is the long battery life. The combination of an efficient processor and a high-capacity battery makes the Framework able to stay running for hours.\nSecurity, Privacy, and Webcam For security and privacy, the Framework has several great features. For signing in (on supported OSes), you can use the fingerprint sensor integrated into the power button for authentication. While my previous laptop had a Windows Hello capable camera, the fingerprint reader is just about as easy to use. The fingerprint reader works well\nOn the webcam, the Framework has physical toggles to disable the webcam and disable the microphone (independently). They toggles have a nice red section visible when disabled and the camera has a light when it is active. It is really nice to have physical switches for the cameras, and since I am using the fingerprint sensor for login (instead of the facial recognition of my previous laptop), I can leave the camera disabled most of the time. The camera is 1080p and does a good enough job with challenging situations like low light and high contrast environments.\nScreen The screen is a 2256 x 1504 (3:2) glossy screen. The extra screen real estate is nice for tasks that can make use of the extra vertical space, media consumption which is mostly 16:9 or wider leaves unused space on the screen. The maximum brightness of the screen is quite bright and is easily visible in direct sunlight. The screen also has a light detector which can be used for automatic screen brightness adjustments. However, at least in Windows, the auto brightness works well but causes a massive jump in brightness when adjusting to above ~50%. Due the the glossy, highly-reflective screen, bright sun from behind makes it hard to read the screen even at maximum brightness. I\u0026rsquo;m planning to investigate what matte screen films/protectors are available that I could use to make the screen less reflective. As I will very rarely use my laptop for very color accurate uses, a matte screen would be better.\nWindows Install and Drivers One cautionary note revolves around the newer, less used components in the Framework. I installed Windows 10 and out of the box, the trackpad and WiFi did not work. I had to use an Ethernet dongle (since I did not get the ethernet Framework module) to download the driver pack from Framework\u0026rsquo;s website. It did not automatically get the drivers from Windows Update like most other firmware/drivers. I also tried Ubuntu 22.04, and while it had fully functional WiFi and and trackpad out of the box, it did not properly adjust the screen backlight based on the function keys (but was able to control the brightness manually using the OS settings slider).\nOverall Impressions Overall, I really like my Framework laptop so far. I did not think I would like the smaller size, but setting the display scaling to lower than the default of 200% (I\u0026rsquo;m testing between 175% and 150%) give more than enough screen space for task I need to do on my laptop. After writing this whole post on the keyboard both on a couch and a desk, it is comfortable to type on and quick to pick up touch typing. It is small and portable while having good performance, battery longevity, and screen real estate. I wish it was a bit bigger as I like a laptop with a larger screen, but for the chassis size the screen is nearly 100% of the size of the laptop footprint. With a 11-in-1 USB dongle, it has as much or more connectivity than my desktop. It works flawlessly with thunderbolt docks (at least the ones I have tested). The first install of Windows 10 was a little painful having to install the driver bundle, but that is a small, one-time price to pay for a nice machine on an old OS.\n9.5/10. Would recommend.\n","description":"I recently upgraded my laptop to a Framework laptop since my old trusty laptop's screen cracked and a replacement screen cost as much as new some laptops. These are my initial impressions of the laptop's build, performance, and usability.","id":2,"section":"posts","tags":["hardware","life"],"title":"Framework First Impressions","uri":"https://johnhollowell.com/blog/posts/framework-first-impressions/"},{"content":" Trying to boot off an NVMe drive on older hardware can cause some issues. If you are running an older BIOS/UEFI, it may not have the needed drivers to understand how to talk to a NVMe drive. I ran into this exact issue when trying to boot my Dell R510 from an NVMe drive.\nTo boot from NVMe, I would need to use some shim which could be booted by the BIOS which would chain-boot the actual OS on the NVMe.\nAttempt 1 - Clover The first method I attempted to used was the Clover Bootloader. Clover, while primarily used for Hackintoshes, can have NVMe support added and chain boot to another disk. I wanted to try this first as I would prefer an OS-indifferent solution that would continue to work no matter what I installed on the NVMe.\nI attempted to image Clover onto a USB drive and after several wrong attempts, I finally formatted the USB as fat32 and just copy/pasted the contents to the drive. I then followed instructions I found to enable NVMe compatibility by copying NvmExpressDxe.efi from EFI/CLOVER/drivers/off into EFI/CLOVER/drivers/BIOS/ and EFI/CLOVER/drivers/UEFI/. I then modified the EFI/CLOVER/config.plist file to automatically boot the the NVMe drive after a 5 second pause.\nHowever, I could never get Clover to read this config.plist file. I tried placing it in other paths that were suggested by comments on the internet. I tried reverting to the original file and modifying one small value to ensure I had not messed up the file formatting. Still, I could not get Clover to read the config file and automatically boot from the NVMe drive. It would just remain at the boot selection menu where I could manually select the NVMe to boot from which would then work perfectly.\nAttempt 2 - Proxmox Boot Proxmox comes with the proxmox-boot-tool tool which is used to synchronize all the boot disks with the UEFI (ESP) partition. After giving up on Clover, I looked into proxmox-boot-tool and found I could just place an extra ESP partition on the USB drive and let proxmox-boot-tool keep it up-to-date and synced.\nRather than creating the correct partitions in the correct locations and of the right size, I just did a dd if=/dev/\u0026lt;root pool\u0026gt; of=/dev/\u0026lt;usb drive\u0026gt; bs=1M count=1024 to copy over the first 1 GB of the disk. I then used gparted to delete the main partition (leaving the BIO and ESP partitions) and to give the remaining partitions new UUIDs. I then booted into Proxmox and proxmox-boot-tool format /dev/disk/by-uuid/\u0026lt;USB ESP partition UUID\u0026gt; --force and proxmox-boot-tool init /dev/disk/by-uuid/\u0026lt;USB ESP partition UUID\u0026gt;. Once that finished, I rebooted and the USB drive was used as the boot drive which booted into the main Proxmox OS.\nConclusion I\u0026rsquo;ve had this in place for a few months now and it has worked perfectly through several updates to the boot cmdline options and kernel updates.\n","description":"My process of finding the best way to boot Proxmox off an NVMe drive in an old Dell R510","id":3,"section":"posts","tags":["sysadmin","proxmox"],"title":"NVMe Boot in Proxmox on Older BIOS","uri":"https://johnhollowell.com/blog/posts/nvme-proxmox-bios/"},{"content":" This was my first year going to the All Things Open and my first in-person conference in several years.\nOverall, I really enjoyed the conference and would recommend other\u0026rsquo;s attend. It definitely helped that I already live in Raleigh so I didn\u0026rsquo;t have to travel to the conference, but even traveling to the conference would be a good experience.\nVenue The Raleigh conference center is a spacious venue. The paths to the session rooms are wide and easy to access. Most of the session rooms were large enough to fit everyone in the session. The conference center has ample surrounding parking and food options if the catered sandwiches don\u0026rsquo;t cover your appetite. The sponsor/vendor booths were set up in the atrium with plenty of room to interact with the vendors and still have room to walk past. All the areas were clean and tidy and the HVAC worked well in all but the smallest session room when it was packed.\nVendor Booths There were a lot of vendors spread around the whole atrium area. The conference did an interesting optional gamification addition to the conference: the keynote sessions and each vendor booth had a code which when entered into the conference app would add points to your score. At the end of each day to top scorers were randomly draw for some very nice prizes.\nThere were a lot of really nice vendors present. From large companies like AWS, Microsoft, and Meta to small FOSS organizations like the FSF and OSI. Many vendors had great swag and welcoming representatives to talk to. While most of the companies were definitely focused on selling to enterprise customers, there were many that had personal/community versions of the software available and knowledgeable people to answer technical questions.\nSessions The session subjects covered a wide range of enterprise related to tracks focused on the open source community and collaboration. Some of the sessions were livestreamed for the virtual attendees (and thus recorded) while some were not recorded. I mostly attended the non-recorded sessions as I can watch the recorded sessions later, but all the sessions were well attended.\n","description":"My experience attending All Things Open for the first time","id":4,"section":"posts","tags":["life","ATO"],"title":"All Things Open 2022 Impressions","uri":"https://johnhollowell.com/blog/posts/ato22/"},{"content":" This December I graduated from Clemson University with my Masters degree in Computer Science. There were a lot of things I learned (obviously), but I think the most useful things I learned were not from the course material, but my outside learning and interactions with peers and professors.\nProject Organization All my courses this semester were project based; while some of the grade for the course came from quizzes or homeworks, over 50% came from a semester long project. My experience with these projects greatly differed based on the professor\u0026rsquo;s involvement and whether it was a group project.\nProblem Definition One of my main gripes for several of my project assignments was the complete lack of the professor defining what the project should look like. While there was some guidance on the general category of project that was required, there was little to no guidance of what specific topics were in scope. We submitted a project proposal, which would have helped with validating the acceptability of the project, however the professors rarely commented on the validity of the proposal, let alone return a grade for the proposal in a reasonable amount of time (read: before the end of the semester).\nThis is a perfect example of why requirements gathering and client interaction is such an important part of the development lifecycle. Knowing the plan for the project before spending development time ensures it is not wasted on something that is not the desired result. Having strict requirements allows the developer to precisely match the functionality to the desired outcomes.\nDeliverables Another important aspect which was mostly glossed over was deliverables. While each professor did say a deliverable of the project would be a final paper, specifics on the format, length, and content of the paper were lacking or never given. In addition, other deliverables were requested after the initial assignment was created, often at the very end of the semester. While this is not that uncommon in \u0026ldquo;real life,\u0026rdquo; often added requirements/deliverables will push back the projects due date; not so with school projects which must be done by the end of the semester.\nGroup Work Group work in school is almost always a complete mess. Over the course of my Masters degree, I\u0026rsquo;ve been in some okay groups and a lot of bad groups. I\u0026rsquo;ve been in groups where someone went completely AWOL for several months and only responded to messages when it was time for them to add their name to the deliverables. I\u0026rsquo;ve also been in some groups that were fantastic where the team members understood that occasionally someone might have other stuff they needed to prioritize but everyone would at the end of the semester all contributed equally. The best groups recognized the different skills of each member and assigned tasks to the person that was most capable of completing it.\nGroup work in school is very different from working in teams in industry. In school your group grade is at best 10% based on your individual contribution. This leads some people to not contribute to the team and just accept a 90% at their max grade. In work, if you do not do the tasks assigned to you, no one is going to do your tasks and it is very apparent who\u0026rsquo;s responsibility they are. Getting paid do do the work rather than paying to do the work also drastically changes the motivation and desire to complete the work.\nSelf Learning Most of the course I took in my Masters program covered information I had learned previously either on my own or on the job. This meant that a large portion of the course material was redundant to me. However, these courses gave me the opportunity to deepen my knowledge of the covered material and utilize the professors as a resource to discover new corollary topics to learn on my own. This gave me the opportunity to learn at my own pace and follow the rabbit trails that I find interesting.\nI have also had courses that I had to teach myself; professors that don\u0026rsquo;t teach or teach wrong material. One professor in particular I had to stop going to class as listening to her lectures decreased/confused my pre-existing knowledge on the topic.\nLab Teaching Assistantship I had a lot of fun being a Teaching Assistant (TA) for a undergrad lab section this past semester. I got to befriend some really cool students and get a taste of what it takes to teach. As I would like to teach at some point in the future, this was a fantastic opportunity to understand some of the requirements of teaching, experience the \u0026ldquo;joy\u0026rdquo; of grading, and dealing with students\u0026rsquo; questions and concerns.\n","description":"This December I graduated from Clemson University with my Masters degree in Computer Science. There were a lot of things I learned (obviously), but I think the most useful things I learned were not from the course material, but my outside learning and interactions with peers and professors.","id":5,"section":"posts","tags":["life","opinion"],"title":"Masters Degree Takeaways","uri":"https://johnhollowell.com/blog/posts/masters-degree-takeaways/"},{"content":" ZFS is a great filesystem that I use on most of my systems and it makes full-drive backups a breeze when I am refreshing hardware in my homelab. However, sometimes I want to backup to a non-ZFS system. These are the steps I use for fast and verified backups to a file on another computer.\nTL;DR: Combine the power of ZFS, zStandard, pv, and netcat to have a fast backup of a ZFS snapshot with verbose metrics of the process and progress.\nBackground If you already know about ZFS, snapshots, replication, and zStandard, feel free to skip this section. ZFS is a next-generation filesystem which supports a lot of great usability, data integrity, and performance features.\nOne of the most useful features are snapshots. Since ZFS is a copy-on-write (COW) filesystem, it can make a \u0026ldquo;copy\u0026rdquo; of an entire filesystem instantly as it just stores the current state and keeps blocks of data even if they later get updated/deleted. This is incredibly useful for backing up a system, as you can make a snapshot of the system instantly while it is running and then take the time to transfer the data.\nZFS can take a snapshot and zfs send the data in a stream that can be piped to a file, other commands, or a zfs receive on another host to load the datasets to that host\u0026rsquo;s storage and make the files available on the live filesystem. Receiving to another system has many benefits, but one major problem is the destination requires a ZFS pool mounted that has enough unused storage to receive all the incoming data. Sometimes this is not feasible, or even if the destination has a working pool it is not desired to mix in another filesystem with the existing data. In this case, sending to a file will store the entire send stream that can later be cat\u0026rsquo;d back to a zfs receive whenever desired.\nOne other tool used in this guide is zStandard. This is a newer compression library with great compression ratios while maintaining fairly high compression speed and incredibly fast decompression speed. I love zStandard and try to use it in everything. It has also had a large adoption increase in the last year or so with many other projects including zStandard compression support (ZFS, btrfs, tor, and Rsync to name a few).\nSetup There are two hosts: one using ZFS which will be backed up (src.example.com), and one host which will store the backup (dest.example.com). This destination host only needs enough storage space to store the (compressed) send stream.\nAll code is run on src.example.com unless otherwise noted. Making a Snapshot ZFS send streams only work on snapshots, so we need to create a snapshot of the current files and data to be able to send it. If you already have a up-to-date snapshot (maybe from automation), you can just uses that snapshot.\nTo create a snapshot, you either need to be root (run the following command with sudo), or have the snapshot ZFS permissions on the dataset. As we will be creating a recursive snapshot of all datasets, it is easier to just run commands as root.\nThe format of the snapshot command is\nzfs snap[shot] pool/datasetA/subdataset/thing1@snapshot-name.\nTo snapshot the \u0026ldquo;testing\u0026rdquo; dataset on my \u0026ldquo;tank\u0026rdquo; pool with the snapshot name \u0026ldquo;backup_2021-01-02_0304\u0026rdquo;, I would use either command\n1 2 zfs snap tank/testing@backup_2021-01-02_0304 zfs snapshot tank/testing@backup_2021-01-02_0304 To backup an entire pool, use zfs snap -r tank@full_backup which will recursively (-r) snapshot the given dataset and all datasets below it.\nDetermining the Size of the Send Now that we have our snapshot, it would be nice to know how much data we will be sending and storing for our backup. We can either get a (fairly accurate) estimate of the size of the send (quick) or get the exact size of the send. Unless you really need to know the exact size of the send, I recommend the fast method\nFast Size We can get an estimate of the size of a send by running the send with the dry-run flag (-n) in verbose mode (-v).\n1 zfs send -R -n -v tank@full_backup The last line should tell you the estimate of the size of the send.\nSlow Size If you really need the exact size of the send, you can use wc to get the total bytes being sent.\n1 zfs send -R tank@full_backup | wc -c If you want to see the speed that zfs can read the send data off your storage, you can use pv (you might need to install it) to see the size and speed.\n1 zfs send -R tank@full-backup | pv \u0026gt; /dev/null #fullsend Now that everything is prepared, we can actually send the data to the destination. We\u0026rsquo;ll start with the most basic form and add on some extra commands to add speed and metrics of the status of the send.\nIn the following examples, the zfs send command is used with the -R flag. This makes an \u0026ldquo;replication\u0026rdquo; send stream which can fully recreate the given snapshot from nothing. You can omit it if that is not the functionality you need.\n-R, \u0026ndash;replicate\nGenerate a replication stream package, which will replicate the specified file system, and all descendent file systems, up to the named snapshot. When received, all properties, snapshots, descendent file systems, and clones are preserved. 1\nBasic Send Getting bits from A to B is pretty easy. We can use SSH to send the data to the destination host and save it as a file2.\n1 zfs send -R tank@full-backup | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap\u0026#34; We can use the size we found earlier to get a rough progress bar. pv can take in the size of the stream and use it to determine an ETA and progress. It can take integer values with units of \u0026ldquo;k\u0026rdquo;, \u0026ldquo;m\u0026rdquo;, \u0026ldquo;g\u0026rdquo;, and \u0026ldquo;t\u0026rdquo;3.\nAssuming we have 24860300556 bytes (23.2GiB), we could use either of the following\n1 2 zfs send -R tank@full-backup | pv -s 24860300556 | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap\u0026#34; zfs send -R tank@full-backup | pv -s 24G | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap\u0026#34; If you have ZFS installed on the destination, you can check validate the send stream using zstreamdump4.\n1 2 # on dest.example.com cat /path/to/saved/file.zfsnap | zstreamdump While this works and is super reliable, it is inefficient in its data storage size and transport cost. The send stream is uncompressed on your destination and SSH can use significant CPU on low-power devices.\nThe next two solutions seek to solve these problems.\nCompression As long as you are not sending a raw or encrypted snapshot, there will be some amount of compressible data in the send stream. We can compress the send stream so it is (a bit) smaller on the destination\u0026rsquo;s storage.\nYou can compress on either the source or the destination, however compressing on the source means less data is transmitted over the network which usually is slower than the CPU needed for compression.\nWe\u0026rsquo;ll use zStandard due to its speed, compression ratio, and adaptable compression level.\nBasic Usage\n1 zfs send -R tank@full-backup | zstd -c | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap\u0026#34; ZStandard can also use an adaptive compression level. This means that if the network is slow and the compressor would otherwise be idle, it can increase the compression level and can also reduce the level if the network speeds up. This does mean that it can be a low compression ratio, but if reduced storage space is desired, the stream can be recompressed (e.g. zstd -d /path/to/saved/file.zfsnap.zst | zstd -T0 -19 /path/to/saved/file_smaller.zfsnap.zst). The minimum and maximum levels for the adaption can be set, but using just --adapt defaults to sane defaults (3 to 15).\nIt can also use multiple threads to fully utilize all the cores in the host. The number of threads can be specified or set to 0 to use the same number of threads as cores (-T0)5. It has a verbose mode (-v) as well which gives insight to the compression level and compression ratio of the stream.\n1 zfs send -R tank@full-backup | zstd -c -v -T0 --adapt=min=1,max=19 | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap.zst\u0026#34; pv can also be used to give progress and speed calculations (however, it seems that the verbose output of zstd conflicts with pv):\n1 zfs send -R tank@full-backup | pv -cN raw -s 24G | zstd -c -T0 --adapt=min=1,max=19 | pv -cN compressed | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap.zst\u0026#34; Local Send Only use the following across a network you trust (not the internet). This method sends data unencrypted. SSH takes a lot of processing power to encrypt data when sending large amounts of data through it. If we are on a secure network where we can sacrifice encryption for speed, we can use netcat instead of ssh.\nHowever, there is not server on the destination (unlike the SSH daemon), so we need to start a netcat server on the destination to listen (-l) for connections on a port (12345) and have it redirecting to the destination file (with pv showing us stats on the receiving side).\n1 2 # on dest.example.com nc -l 12345 | pv \u0026gt; /path/to/saved/file.zfsnap Now we can send it data to save to the file\n1 zfs send -R tank@full-backup | pv -s 24G | nc dest.example.com 12345 Putting it all together 1 2 # on dest.example.com nc -l 12345 | pv \u0026gt; /path/to/saved/file.zfsnap.zst 1 2 3 4 5 6 # on src.example.com snapName=\u0026#39;tank@full-backup\u0026#39; zfs snap -r ${snapName} sendSize=$(zfs send -v --dryrun -R ${snapName} | grep \u0026#34;total estimated\u0026#34; | sed -r \u0026#39;s@total estimated size is ([0-9\\.]+)(.).*@\\1\\n\\2@\u0026#39; | xargs printf \u0026#34;%.0f%s\u0026#34;) zfs send -R ${snapName} | pv -cN raw -s ${sendSize} | zstd -c -T0 --adapt=min=1,max=19 | pv -cN compressed | nc dest.example.com 12345 https://openzfs.github.io/openzfs-docs/man/8/zfs-send.8.html\u0026#160;\u0026#x21a9;\u0026#xfe0e;\nAs far as I know, the .zfsnap is not an official or commonly used extension. However, it helps me know what the file is, so I\u0026rsquo;ve used it here. Use whatever file name and extension you want.\u0026#160;\u0026#x21a9;\u0026#xfe0e;\nhttps://linux.die.net/man/1/pv\u0026#160;\u0026#x21a9;\u0026#xfe0e;\nhttps://linux.die.net/man/8/zstreamdump\u0026#160;\u0026#x21a9;\u0026#xfe0e;\nThe documentation for zStandard notes that using the -T flag with --adapt can cause the level to get stuck low. If you have problems with the compression level getting stuck at a low value, try removing the threads flag.\u0026#160;\u0026#x21a9;\u0026#xfe0e;\n","description":"ZFS is a great filesystem which I use on most of my systems and it makes full-drive backups a breeze. However, sometimes I want to backup to a non-ZFS system. These are the steps I use for fast and verified backups to a file on another computer.","id":6,"section":"posts","tags":["ZFS","backup","sysadmin"],"title":"ZFS Backups to Files","uri":"https://johnhollowell.com/blog/posts/zfs-backups-to-files/"},{"content":"I first noticed Kernel Same-page Merging (KSM) while working with Virtual Machines (VMs) under KVM (in Proxmox VE).\nKSM is a way of reducing physical memory usage by using one physical page of memory for all duplicate copied of that page. It does this by periodically scanning through memory, finding duplicate pages, and de-duplicating them via virtual memory. It is an extension of how the kernel shares pages between fork()\u0026lsquo;ed processes and uses many of the same methods of sharing memory. KSM is most often used with virtualization to de-duplicate memory used by guest Operating Systems (OSs), but can be used for any page of memory which the program registers with KSM to scan. \u0026ldquo;Red Hat found that thanks to KSM, KVM can run as many as 52 Windows XP VMs with 1 GB of RAM each on a server with just 16 GB of RAM.\u0026rdquo;1\nVirtual Memory Background To fully understand how KSM works, a (at least) basic understanding of how virtual memory work is required.\nTo prevent programs from having to know where every other process on the computer is using memory, the kernel (the all-powerful dictator of the OS) tells each process it has memory starting at address 0. It then keeps a record of where in actual (physical) memory each block (page) or the virtual memory is located.\nIt uses this mapping to translate memory addresses each time the process reads or writes to memory.\n© Computer History Museum This virtual memory also allows things like memory-mapped files on disk and Copy-On-Write (COW) pages. When a process clones (forks) itself, it doesn\u0026rsquo;t have to make a copy of all the memory it was using. It simply marks each page as COW. Each process can read from their memory with both virtual addresses pointing to the same physical page (now marked COW), but when either attempts to write to memory, the existing physical page is left inn place (so the other process can still use it) and a new physical page is allocated and mapped to the writer\u0026rsquo;s virtual memory. This allows pages of memory that are not changed in forked processes to use no additional memory.\nthe same process is used by KSM: it finds duplicate pages in the memory ranges registered with it, marks one of the physical pages as COW, and frees the other physical pages after mapping all the virtual pages to the one physical page.\nhttps://kernelnewbies.org/Linux_2_6_32#Kernel_Samepage_Merging_.28memory_deduplication.29\u0026#160;\u0026#x21a9;\u0026#xfe0e;\n","description":"Today I Learned about Kernel Same-page Merging (KSM)","id":7,"section":"posts","tags":["Linux","memory"],"title":"TIL: Kernel Same-page Merging (KSM)","uri":"https://johnhollowell.com/blog/posts/til-ksm/"},{"content":" Please read A Trickle Is Better Than Nothing before reading this post. I just got over having no internet at my apartment for over a week. I was gone a portion of the week, but it was still very inconvenient. Working remotely doesn\u0026rsquo;t help as to get paid I need to have an internet connection (but not necessarily a fast connection).\nWorking Around It While I could have use cellular data to carry me through, I had already used a significant portion of my data cap on various travels this summer. I ended up just going onto campus and working from my laptop in a computer lab.\nWhile on campus (with its wonderful gigabit symmetrical internet), I downloaded some videos from my YouTube Watch Later playlist so I could have some videos to watch at home. I tried to do as much pre-downloading of content I could so I would have it accessible at home.\nMissing the Trickle So I had everything downloaded and I was fine, right? Wrong.\nI do more with my life than just watching YouTube. I play games, I browse social media, and (most frustratingly in this situation) I code. It is impossible to stay up-to-date on PRs and Issues without being able to connect to the internet. While I could have looked at the GutHub website on my phone, I have a lot of nice tooling around Issues/PRs that is on my desktop.\nI also wanted to open some PRs on some FOSS projects I want to improve. I couldn\u0026rsquo;t do a git clone, I couldn\u0026rsquo;t download the devcontainers needed for the new project and language, I couldn\u0026rsquo;t easily research how to do what I wanted in the documentation on StackOverflow. This stopped me dead in my tracks and forced me to either make a trip back to campus to get internet or use the limited cellular data I had left to clone the entire repo and pull all the require container layers.\nWhat If How could it have been if I had at least a small amount of internet? I would still utilize the high-speed connection at campus to download some content to watch, but I would have still been able to pull up the YT page for the video to see comments and the description and to comment and like myself. While it would have taken a while, I could have left the repo and containers to download while I was watching something or making dinner or overnight. I could have refreshed my Issues/PRs and get any updates on their status and checks. I could have seen that a new video was released by my favorite channel and either queue the video to download or go somewhere with internet to quickly download it.\nOverall, I am very grateful for the internet I have. This just makes me appreciate the internet all the more with its redundancy and high availability and goes to prove that the last mile is really the most vulnerable segment of any network or connection.\n","description":"I just got over having no internet at my apartment for over a week, and I can confirm that a trickle is better than nothing.","id":8,"section":"posts","tags":["web","life","opinion"],"title":"Nothing Is Definitely Worse Than a Trickle","uri":"https://johnhollowell.com/blog/posts/nothing-is-definitely-worse-than-a-trickle/"},{"content":" Setting up and maintaining a development environment is hard, especially when you need to destructively test features or libraries. Especially for contributing to a new project, you don\u0026rsquo;t know everything that is needed. Sometimes the install/development instructions assume some base tools or packages that are not included in your development environment of choice.\nIn come devcontainers. Rather than having to search through the README for a project you are wanting to contribute to, installing several packages onto your machine, and troubleshooting when it doesn\u0026rsquo;t work, you can simply open the repository as a devcontainer and you are ready to start contributing. Have a project that requires several separate services (databases, middleware/api server, etc.)? Create a devcontainer using docker-compose and your development environment can launch an entire suit of containers exactly how you need them.\nSetup Install Docker To be able to use containers, we need a container manager: Docker.\nTo get Docker installed, simply follow their instructions\nInstall VS Code To get Visual Studio Code (VS Code) installed, simply follow their instructions\nAdd container remote extension Within VS Code, install the Remote - Containers extension\nClick the Extensions sidebar (or use the \u0026ldquo;Ctrl + Shift + X\u0026rdquo; shortcut) Search for ms-vscode-remote.remote-containers Click \u0026ldquo;Install\u0026rdquo; Test It Out Now that you are ready to use a devcontainer, it is time to test it out!\nYou can grab this blog and use it as the devcontainer to play with. Click on the bottom left in VS Code on the green arrows, find the Container remote section, and select \u0026ldquo;Clone Repository in Container Volume\u0026hellip;\u0026rdquo;, enter https://github.com/jhollowe/blog and hit enter.\nAfter a minute or so of downloading and building your development container, VS Code will be fully functional. You can use the included tasks (Terminal \u0026gt; Run Task\u0026hellip; \u0026gt; Serve) to build and serve the blog. The devcontainer includes everything needed to build the blog and run VS Code. VS Code will even pull in common configuration for tools like Git and SSH.\nModes There are several \u0026ldquo;modes\u0026rdquo; of how to store your files in which you can use devcontainers, each with its own benefits and drawbacks.\n\u0026ldquo;mode\u0026rdquo; Pros Cons container volume * fast\n* fully self-contained environment * hard to access files from outside container mounting a directory * easy to get files in and out\n* allows stateful local files * slow file I/O\n* add/edits/deletes affect the source directory cloning a directory * as fast as a container volume\n* easy to get files into container\n* edits/deletes do not affect the source directory * hard to get files out of container ","description":"Setting up and maintaining a development environment is hard, especially when you need to destructively test features or libraries.","id":9,"section":"posts","tags":["development","containers"],"title":"Getting Started With Devcontainers","uri":"https://johnhollowell.com/blog/posts/getting-started-with-devcontainers/"},{"content":"For environments with complex Active Directory (AD) environments, AD forests can allow flexibility in management and organization of objects.\nBasically, an AD forest allows multiple domains and trees of domains (subdomains) to access and have a shared configuration while still having separate domains with separate host servers.\nThey allow domains to trust and access each other while still maintain separations and boarders. I\u0026rsquo;ve seen this used to allow corporate and client domains to communicate or to have a development domain tree that trust and can cross-talk with the production domain tree while still being separate (this is less common as dev domains are usually just subdomains within the production tree).\nResources\nhttps://en.wikipedia.org/wiki/Active_Directory#Forests,_trees,_and_domains https://ipwithease.com/what-is-a-forest-in-active-directory/ https://www.varonis.com/blog/active-directory-forest/ ","description":"Today I Learned about Active Directory Forests","id":10,"section":"posts","tags":["Active Directory"],"title":"TIL: AD Forests","uri":"https://johnhollowell.com/blog/posts/til-ad-forests/"},{"content":" Changing a user\u0026rsquo;s username on Linux requires no processes be running under that user. This makes sense, but what if we only have that user accessible through a SSh connection? What if we don\u0026rsquo;t want to allow external access to the root account? What if the root account doesn\u0026rsquo;t have a password?\nBackground I was recently spinning up a bunch of Raspberry Pis running Ubuntu 20.04 and some VPSes also running Ubuntu 20.04. I wanted to change the username on these nodes, but only really had access to the ubuntu (sudo) account. While I know I could use a cloud-init file to create a user exactly how I want (more on that in a future post), I didn\u0026rsquo;t want to re-flash the nodes and was not able to add a cloud-init file before boot on the VPSes.\nThe Process Getting The Commands To Run So we can\u0026rsquo;t change the username of a user with running processes, but a SSH session and a bash shell both run under my user whenever I\u0026rsquo;m connected.\nThe main problem is executing a command from a user (and sudo-ing to root) while not having that user have a process running.\nUsing either of the commands below allows a command to be run as the root user which will continue running\n1 2 3 4 5 # interactive shell sudo tmux # non-interactive command sudo -s -- sh -c \u0026#34;nohup \u0026lt;command\u0026gt; \u0026amp;\u0026#34; Now that we can have a command running as root independent of the initiating user, we need to kill everything of the user so we can run usermod commands without difficulty. We kill the processes and wait a couple seconds for them all to terminate. Then we can run whatever commands we need.\n1 ps -o pid= -u \u0026lt;current_username\u0026gt; | xargs kill \u0026amp;\u0026amp; sleep 2 \u0026amp;\u0026amp; \u0026lt;command\u0026gt; What This Command Does ps lists the processes running on the system -o pid= selects only the process ID (pid) and does not create a header for the column (=) -u \u0026lt;username\u0026gt; selects only the processes running under \u0026lt;username\u0026gt; | takes the output of the previous command (ps) and makes it the input of the following command (xargs) xargs takes a line separated list (can change the separator) and turns them into arguments for the following command (-r tells it to do nothing if its input is empty) kill takes a pid (or list of pids) and terminates the process. While kill can send different signals to processes, this uses the default signal (TERM). \u0026amp;\u0026amp; runs the following command if the preceding command exited successfully (exit code 0) sleep 2 wait 2 seconds for the killed processes to terminate Now, we can get to actually changing the username!\nChanging The Username Now that we can run commands as root without our user running processes, we can proceed to change the username and other related tasks.\nThese commands assume you are running as root. If not, you may need to insert some sudo\u0026rsquo;s as necessary\n1 2 3 4 5 6 7 8 9 10 11 # change the user\u0026#39;s username usermod -l \u0026lt;new_username\u0026gt; \u0026lt;current_username\u0026gt; # move the user\u0026#39;s home directory usermod -d /home/\u0026lt;new_username\u0026gt; -m \u0026lt;new_username\u0026gt; # change user\u0026#39;s group name groupmod -n \u0026lt;new_username\u0026gt; \u0026lt;current_username\u0026gt; # replace username in all sudoers files (DANGER!) sed -i.bak \u0026#39;s/\u0026lt;current_username\u0026gt;/\u0026lt;new_username\u0026gt;/g\u0026#39; /etc/sudoers for f in /etc/sudoers.d/*; do sed -i.bak \u0026#39;s/\u0026lt;current_username\u0026gt;/\u0026lt;new_username\u0026gt;/g\u0026#39; $f done Putting it all together When we put it all together (with some supporting script), we get change-username.sh as seen below:\n1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 #!/bin/bash currentUser=$1 newUser=$2 if [ $# -lt 2 ]; then printf \u0026#34;Usage:\\n\\t$0 \u0026lt;current_username\u0026gt; \u0026lt;new_username\u0026gt; [new_home_dir_path]\\n\u0026#34; exit 1 fi if [ $(id -u) -ne 0 ];then echo \u0026#34;Root permission needed for modifying users. Can not continue.\u0026#34; exit 2 fi newHome=\u0026#34;/home/$newUser\u0026#34; if [ $# == 3 ];then newHome=$3 fi echo \u0026#34;Changing $currentUser to $newUser\u0026#34; echo echo \u0026#34;Running this script has the possibility to break sudo (sudoers file(s)) and WILL kill all processes owned by $currentUser\u0026#34; echo \u0026#34;$currentUser will be logged out and will need to reconnect as $newUser\u0026#34; read -n1 -s -r -p $\u0026#39;Continue [Y/n]?\\n\u0026#39; key if [ $key != \u0026#39;\u0026#39; -a $key != \u0026#39;y\u0026#39; -a $key != \u0026#39;Y\u0026#39; ]; then echo \u0026#34;Stopping; no files changed\u0026#34; exit 2 fi # put the main script in /tmp so the user\u0026#39;s home directory can be safely moved tmpFile=$(mktemp) cat \u0026gt; $tmpFile \u0026lt;\u0026lt; EOF #!/bin/bash shopt -s extglob # terminate (nicely) any process owned by $currentUser ps -o pid= -u $currentUser | xargs -r kill # wait for all processes to terminate sleep 2 # forcibly kill any processes that have not already terminated ps -o pid= -u $currentUser | xargs -r kill -s KILL # change the user\u0026#39;s username usermod -l \u0026#34;$newUser\u0026#34; \u0026#34;$currentUser\u0026#34; # move the user\u0026#39;s home directory usermod -d \u0026#34;$newHome\u0026#34; -m \u0026#34;$newUser\u0026#34; # change user\u0026#39;s group name groupmod -n \u0026#34;$newUser\u0026#34; \u0026#34;$currentUser\u0026#34; # replace username in all sudoers files sed -i.bak \u0026#39;s/\u0026#39;$currentUser\u0026#39;/\u0026#39;$newUser\u0026#39;/g\u0026#39; /etc/sudoers for f in /etc/sudoers.d/!(*.bak); do echo \u0026#34;editing \u0026#39;\\$f\u0026#39;\u0026#34; sed -i.bak \u0026#39;s/\u0026#39;$currentUser\u0026#39;/\u0026#39;$newUser\u0026#39;/g\u0026#39; \\$f # TODO fix $f not getting the file path for some reason done EOF echo \u0026#34;Putting script into $tmpFile and running\u0026#34; chmod 777 $tmpFile sudo -s -- bash -c \u0026#34;nohup $tmpFile \u0026gt;/dev/null \u0026amp;\u0026#34; ``` \u0026lt;!-- markdownlint-disable-file --\u0026gt; requirements Command(s) Package bash bash ps, kill procps usermod, groupmod passwd sed sed xargs findutils ","description":"Changing a user's username on Linux requires no processes be running under that user. This makes sense, but what if we only have that user accessible through a SSh connection?","id":11,"section":"posts","tags":["sysadmin"],"title":"Change Username Without Separate Session","uri":"https://johnhollowell.com/blog/posts/change-username-without-separate-session/"},{"content":"One of the most important parts of a working cluster is the interconnection and communication between nodes. While the networking side will not be covered now, a very important aspect will be: passwordless SSH.\nInter-node SSH The first task to getting easy access between nodes is ensuring SSH access between all the nodes.\nWhile not necessary, I recommend adding all your nodes to the /etc/hosts file on each node. For example, the /etc/hosts file might look like\n1 2 3 4 5 6 7 8 9 127.0.0.1 localhost # The following lines are desirable for IPv6 capable hosts ::1 ip6-localhost ip6-loopback fe00::0 ip6-localnet ff00::0 ip6-mcastprefix ff02::1 ip6-allnodes ff02::2 ip6-allrouters ff02::3 ip6-allhosts to which I would add (using the actual IPs of the nodes)\n1 2 3 4 192.168.0.11 node01 192.168.0.12 node02 192.168.0.13 node03 192.168.0.14 node04 Automate adding to your hosts files 1 2 3 4 5 6 7 8 9 for node in localhost node02 node03 node04; do ssh $node \u0026#34;cat | sudo tee -a /etc/hosts \u0026gt; /dev/null\u0026#34; \u0026lt;\u0026lt; EOF 192.168.0.11 node01 192.168.0.12 node02 192.168.0.13 node03 192.168.0.14 node04 EOF done After this is added to your hosts file on all your nodes, from any node you should be able to ssh node1 from any of them successfully after entering your password.\nNOTE: if you have not configured static IP addresses for your nodes, any changes to their IPs will require you changing the hosts file on all your nodes. Passwordless SSH To be able to SSH between nodes without the need for a password, you will need to create an SSH key. This will allow SSH to work in scripts and tools (MPI) without needing user interaction.\nFirst, we need to create a key. There are multiple standards of encryption you can use for SSH keys. The default is RSA, but it is generally considered to be less secure than modern standards. Therefore, these instructions will show how to create a ed25519 key. This will work on your cluster, but some (very) old systems may not support ED25519 keys (RSA keys will generally work everywhere even though they are less secure).\nTo create a key, use this command on one of your nodes:\n1 ssh-keygen -t ed25519 -a 100 -f ~/.ssh/id_ed25519 -C \u0026#34;Inter-node cluster ssh\u0026#34; This article does a good job of breaking down what all the arguments are used for.\nNext, we need our nodes to trust the key we just created. We\u0026rsquo;ll start with getting the current node to trust the key.\n1 ssh-copy-id -i ~/.ssh/id_ed25519 localhost NOTE: If you have already setup NFS with a shared home directory, you don\u0026rsquo;t need to do anything further; the key is accessible and trusted on all the nodes. Now we can just copy these files to all the other nodes so that they can use and will trust this key.\n1 2 3 4 5 for node in node02 node03 node04; do # list all the nodes that should get the key ssh-copy-id -i ~/.ssh/id_ed25519 $node # you will need to enter your password for this step scp ~/.ssh/id_ed25519 $node:.ssh/ ssh $node \u0026#34;chmod 600 ~/.ssh/id_ed25519\u0026#34; # ensure the key is locked down so SSH will accept it. done And to make all the nodes trust each other\u0026rsquo;s fingerprints\n1 2 3 for node in node02 node03 node04; do scp ~/.ssh/known_hosts $node:.ssh/ done We can check that we can SSH into all the nodes without having to enter a password:\n1 2 for node in node2 node3 node4; do ssh $node \u0026#34;hostname\u0026#34; ","description":"One of the most important parts of a working cluster is the interconnection and communication between nodes. While the networking side will not be covered now, a very important aspect will be: passwordless SSH.","id":12,"section":"posts","tags":["SSH","cluster","networks"],"title":"Cluster SSH","uri":"https://johnhollowell.com/blog/posts/cluster-ssh/"},{"content":" So you want to build a Raspberry Pi cluster.\nThe first thing to do is determine the size of a cluster you want to build. You can go with any number greater than one, but I\u0026rsquo;ve found that 4-8 is a good sweet spot between too few nodes to get a real feel of cluster operation and too many nodes to manage and maintain. For this and following posts, I will be assuming a cluster of 4 nodes (node01 to node04).\nHardware To run a cluster you also need some supporting hardware, where N is the number of nodes (examples given as links):\nN Raspberry Pi 4 N Micro SD Cards (16GB or more preferred) 1 gigabit ethernet switch (at least N+1 ports) OR router with N LAN ports (see the Networking section below) N short \u0026ldquo;patch\u0026rdquo; ethernet cables Power Supply (choose one) N USB C power supplies N/4 4-port USB power supplies with N USB C cables N/4 BitScope Quattro Raspberry Pi blades and power supply 1 USB Drive [optional] 1 4-slot case (with heatsinks) [optional] 1 power strip [optional] While you can use older models of the Pi if you already have them, using the most recent version will provide the most performance at the same price. Just make sure you get power cables that are compatible with your nodes.\nYou can also use larger RAM versions, but any amount of RAM should work for a minimally functional cluster. The more memory on your nodes, the larger problems they can solve and more performant they can be (caches for network and local storage and a reduction in swappiness).\nPut together the nodes If you got the BitScope Quattro for power or a case for your Pis, you will want to to get your Pis in place. This is also a great time to put on any heatsinks you have for your Pis.\nI would also recommend taking this time to decide the identity of each node and labeling them with a number or other identifier. I\u0026rsquo;ve decided to use numbers to identify my nodes, so I will use a marker or label to indicate which node is which number. This makes troubleshooting easier later on.\nConnect the wires Once your Pis are all ready to go, we need to connect them to power and network. It is useful to connect power and network cables in the order of the Pis so troubleshooting is easier when something goes wrong. Be sure to make sure all the cables are fully inserted.\nNetworking Connections For networking, you can take two paths:\nUse just a switch and connect the cluster to your home network Use a switch and/or a router to create a dedicated sub-network for your cluster. (You can use a switch to connect more nodes to your router if you have run out of ports on it) I\u0026rsquo;ll be doing the second option as it give better separation from my other devices and allows me to set private IP addresses for my nodes regardless the IPs already in use on my home network.\nRegardless the path your choose, you will need to connect your switch or router\u0026rsquo;s WAN port to your home network so your cluster can access the internet and you can access your nodes. (You could also have your cluster completely air-gapped and use static IPs on the nodes, but not being able to download applications and tools is in my opinion not worth the effort).\nSoftware For this cluster I will be using Ubuntu. Canonical ( the company behind Ubuntu) has done a great job of ensuring Ubuntu is stable on Raspberry Pis (with the help of software from the Raspberry Pi Foundation) and has a 64 bit version available (unlike Raspberry Pi OS as of the time of writing). I will be using 20.04, but the latest LTS version should be fine.\nThere is already a great tutorial on how to install Ubuntu on a Raspberry Pi. Make sure to select the latest LTS version with 64 bit support. Also, we have no need to install a desktop, so you can skip that step.\nConnecting to the nodes If you followed the above tutorial, you should have the IP address of all your nodes. If you can\u0026rsquo;t tell which IP goes to which node, try unplugging the network cables from all but one node, follow the instructions, and repeat for all the other nodes. If you are using a router for your cluster, make sure you are connected to its network (its WiFi or LAN port) and not your home network as the router will block connections from your home network into your cluster network. (if you want, you can create a port forward on your cluster router for port 22 to your so you can SSH into)\nOnce you know what node is what IP address, connect to the first node (which we will use as our head node). Try running ping 1.1.1.1 to ensure your node can connect to the internet. Then follow the cluster SSH guide to setup SSH between all your nodes.\nStatic IP addresses No matter if you have a dedicated cluster network or it is connected to your home network, you should configure static IP addresses for all your nodes so their addresses will not change accidentally in the future.\nPackages In future posts we will install needed packages for configuring our cluster operation, but below are some useful packages that can help with troubleshooting and analyzing cluster performance.\nDon\u0026rsquo;t forget to sudo apt update to make sure you have the latest package database.\nhtop iftop iotop dstat pv ","description":"The basics of getting a cluster of Raspberry Pis powered on and running. Full cluster configuration in later posts.","id":13,"section":"posts","tags":["cluster","networks","hardware"],"title":"Basic Cluster Setup","uri":"https://johnhollowell.com/blog/posts/basic-cluster-setup/"},{"content":"Clemson\u0026rsquo;s School of Computing (SoC) is the place at Clemson where Computer Science (CPSC), Computer Information Systems (CIS), and Digital Production Arts (DPA) are located. Other computing departments (like Computer Engineering) also use some of the SoC\u0026rsquo;s systems. Below are some useful tips and tools for quickly getting going in the SoC.\nAccess Servers The access servers are the way you can access all the SoC computers from off-campus (without having to use the VPN). You can SSH into them and then SSH into other computers through access (or anything else you can do through SSH). You can connect to the access servers using ssh \u0026lt;clemson_username\u0026gt;@access.computing.clemson.edu (or just ssh access.computing.clemson.edu if you computer\u0026rsquo;s username matches your Clemson username). When you connect, you will see a list of lab computers that you can then connect to by using their name (e.g. ssh babbage1). You can also use access2.computing.clemson.edu if the main access server is down or overloaded.\nIf you are on campus, you can directly access the lab computers without the need to go through the access server. Simply use ssh \u0026lt;computer_name\u0026gt;.computing.clemson.edu while on campus (or VPN) and you can directly connect to the machine.\nNOTE: There is a limit in place on the number of connections for each user connecting to the access server. I\u0026rsquo;ve found it to be 4 connections. If you need more connections, consider using both access and access2 or using SSH Multiplexing. Files on the lab computers All the lab computers share your home directory. This means that if you write a file on one computer, you can access it on any other lab computer. This also means your settings for most programs will be the same on all the computers.\nThis also means you can access these files from your own computer as a network drive. Check out these instructions for more information on the subject (use the linux share instructions).\nSSH between computers SSHing between the lab machines can be a bit of a pain when you have to enter your password every time. It also makes it harder to write scripts that use multiple lab computers to work on rendering a project or running some processing. However, if you set up SSH keys on the computers, it allows the lab machines to connect to each other without the need for a password. And since the lab computers share files, once SSH keys are setup on one system, the will work on all the systems.\nThe process of making the keys we will use is fairly straight forward. You can check out more information on what these commands do if you are interested.\n1 2 ssh-keygen -t ed25519 -a 100 -f ~/.ssh/id_ed25519 -C \u0026#34;School of Computing\u0026#34; ssh-copy-id -i ~/.ssh/id_ed25519 localhost This will generate a key for the computers to use, and \u0026ldquo;install\u0026rdquo; it so they will accept connections from that key. Since all the computers have the needed files due to the shared filesystem, all the computers now trust connections from all the other computers.\nSnapshot folder Oh no! You just deleted all the files for your assignment! Not to worry.\nYou home directory (/home/\u0026lt;username\u0026gt;/) on the SoC computers is backed up for just such a problem. Within every folder in your home directory is a hidden folder named .snapshot. It will not appear in any listing of directories, but if you cd into it, you can access all the different backups that are available. You can ls ~/.snapshot/ to see all the different dates that are have backups (there are hourly, daily, and weekly backups). These backup files are read-only, so you will need to copy them back into your home directory to be able to edit them.\nTo access and recover your files, you can either do\n1 2 3 cd ~ cd .snapshot/daily.1234-56-78_0010/path/to/your/files/ cp very-important-file.txt ~/path/to/your/files/ OR\n1 2 3 cd ~/path/to/your/files/ cd .snapshot/daily.1234-56-78_0010 cp very-important-file.txt ~/path/to/your/files/ Teachers\u0026rsquo; Office Hours While is isn\u0026rsquo;t really a technology in the SoC, your teachers are one of best resources to gain knowledge and software development skills. After all, the aren\u0026rsquo;t called teachers for nothing.\nAll teachers are required to have office hours (and so are Teaching Assistants (TAs)). Make use of this time to get to know your teacher, ask questions, and learn more about topics that excite you. It is also a good idea to start projects early (I\u0026rsquo;m not saying I ever did this, but it is what I should have done) so you can ask the teacher questions in office hours before everyone else starts to cram the assignment and office hours get busy.\nYOUR SUGGESTION HERE Is there something you really liked or have often used that you think I should add here or in another post? Get in contact with me and let me know!\n","description":"Clemson's School of Computing can be complicated. Here are some tips and tricks to get started quickly and make the most of the resources you have.","id":14,"section":"posts","tags":[null],"title":"Clemson SoC 101","uri":"https://johnhollowell.com/blog/posts/clemson-soc-101/"},{"content":" Clemson University’s computer labs store files across all the computers using network shares. You usually just access these shares on the lab machines, but you can also add the shares on your own computer as a network drive.\nThere are two main shares on campus: the campus share used by all the Windows (and Mac?) lab machines (e.g. in Cooper Library, Martin, etc.) and the School of Computing’s Linux systems. Both systems can be accessed in a similar way, but with different settings.\nTo access these network shares, you must either be on campus internet (WiFi or Ethernet) or have the Clemson VPN installed and activated on your device. See the CCIT guide for VPN access for more information. The following instructions assume you are using a Windows device to access the shares. Using the credentials as below, you can follow a guide for adding network drives on Mac OS X or Linux (Ubuntu)\nSteps Open File Explorer and go to \u0026ldquo;This PC\u0026rdquo;. Click \u0026ldquo;Map Network Drive\u0026rdquo; in the top ribbon. Choose what drive letter you want the share to appear as (it doesn’t matter what you choose for this; I used \u0026ldquo;Z\u0026rdquo; for this example) Linux Share Windows Share Enter \\\\neon.cs.clemson.edu\\home into the \u0026ldquo;folder\u0026rdquo; box. 5. Check both \"Reconnect as sign-in\" and \"Connect using different credentials\" so the network drive will automatically connect and you can use your Clemson credentials (rather than your local device’s username and password). Click \"Finish\". 6. Enter your University username (with @clemson.edu) and password. (You might have to click \"more choices\" in the login window to be able to enter a new username/password.) \u0026lt;img data-src=\u0026quot;https://johnhollowell.com/blog/posts/accessing-your-clemson-network-shares/neon_creds.png\u0026quot; alt=\u0026quot;example login credentials for neon.cs.clemson.edu\u0026quot; data-caption=\u0026quot;\u0026quot; src=\u0026quot;data:image/svg+xml,%0A%3Csvg xmlns='http://www.w3.org/2000/svg' width='50%25' height='' viewBox='0 0 24 24'%3E%3Cpath fill='none' d='M0 0h24v24H0V0z'/%3E%3Cpath fill='%23aaa' d='M19 3H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zm-1 16H6c-.55 0-1-.45-1-1V6c0-.55.45-1 1-1h12c.55 0 1 .45 1 1v12c0 .55-.45 1-1 1zm-4.44-6.19l-2.35 3.02-1.56-1.88c-.2-.25-.58-.24-.78.01l-1.74 2.23c-.26.33-.02.81.39.81h8.98c.41 0 .65-.47.4-.8l-2.55-3.39c-.19-.26-.59-.26-.79 0z'/%3E%3C/svg%3E\u0026quot; class=\u0026quot;lazyload\u0026quot; style=\u0026quot;width:50%;height:;\u0026quot;/\u0026gt; 7. Click \"OK\". Your School of Computing home directory should now appear under the drive letter you chose. NOTE: When adding new files via the network share, they are created with permissions defined by your umask. You can use chmod xxx \u0026lt;file\u0026gt; to change a files permissions to xxx (view a chmod guide for more information on the chmod command) Enter \\\\home.clemson.edu\\\u0026lt;username\u0026gt; where \u0026lt;username\u0026gt; is your university username. 5. Check both \"Reconnect as sign-in\" and \"Connect using different credentials\" so the network drive will automatically connect and you can use your Clemson credentials (rather than your local device’s username and password). Click \"Finish\". 6. Enter your University username (without @clemson.edu) and password. (You might have to click \"more choices\" in the login window to be able to enter a new username/password.) \u0026lt;img data-src=\u0026quot;https://johnhollowell.com/blog/posts/accessing-your-clemson-network-shares/home_creds.png\u0026quot; alt=\u0026quot;example login credentials for home.clemson.edu\u0026quot; data-caption=\u0026quot;\u0026quot; src=\u0026quot;data:image/svg+xml,%0A%3Csvg xmlns='http://www.w3.org/2000/svg' width='50%25' height='' viewBox='0 0 24 24'%3E%3Cpath fill='none' d='M0 0h24v24H0V0z'/%3E%3Cpath fill='%23aaa' d='M19 3H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zm-1 16H6c-.55 0-1-.45-1-1V6c0-.55.45-1 1-1h12c.55 0 1 .45 1 1v12c0 .55-.45 1-1 1zm-4.44-6.19l-2.35 3.02-1.56-1.88c-.2-.25-.58-.24-.78.01l-1.74 2.23c-.26.33-.02.81.39.81h8.98c.41 0 .65-.47.4-.8l-2.55-3.39c-.19-.26-.59-.26-.79 0z'/%3E%3C/svg%3E\u0026quot; class=\u0026quot;lazyload\u0026quot; style=\u0026quot;width:50%;height:;\u0026quot;/\u0026gt; 7. Click \"OK\". Your Windows home directory should now appear under the drive letter you chose. You now have access to your files as if they were just another drive in your computer. Do note that these drives will be significantly slower than your actual computer drives due to higher latency and lower bandwidth.\n","description":"Clemson University’s computer labs store files across all the computers using network shares. You usually just access these shares on the lab machines, but you can also add the shares on your own computer as a network drive.","id":15,"section":"posts","tags":null,"title":"Accessing Your Clemson Network Shares","uri":"https://johnhollowell.com/blog/posts/accessing-your-clemson-network-shares/"},{"content":" Hey There! I\u0026rsquo;m John. I enjoy coding and problem solving. On the side I do some photography and videography work.\nCheck out my main website for more information about me and to get in contact.\n","description":"","id":16,"section":"","tags":null,"title":"About","uri":"https://johnhollowell.com/blog/about/"},{"content":" I\u0026rsquo;m at my extended family\u0026rsquo;s house way out in the middle of nowhere; barely enough cellular connection for an SMS, let alone trying to use any data.\nThey have DSL, but they are so far out that the signal is poor and it also is horrible speed. The fastest I saw while I was there was 700Kbps.\nWhile it is always a shock to go from over 100Mbps to under 1Mbps, I think that we are in an age where low bandwidth is not a show-stopper. Now obviously, downloading large files and games is a lot more tedious, I have found the \u0026ldquo;set everything to download overnight\u0026rdquo; method works quite well.\nI think there are three main reason why you can do more with less bandwidth than ever before.\nCompression and Codecs We have reached the point where processing power is so cheap, most of the time everything else is the limitation. We are glad to spend some power and time compressing data if it means we have more storage space on our devices or use less data. Website analysis tools will now complain if a webserver doesn\u0026rsquo;t compress its responses with at least gzip.\nWe are (slowly) starting to use new video and audio codecs that compress the crap out of the video/audio stream. Many devices are even starting to have highly performant hardware acceleration for these formats so it doesn\u0026rsquo;t even cause high load or power draw on mobile devices. Services like YouTube automatically convert content to many different qualities and have algorithms to pick the best quality that you can support.\nCaches, CDNS, and Apps Every web browser has a cache. Many even have several tiers of cache to give good hit/miss ratios and speed. If you are going to Facebook, you really should only ever need to receive the logo, most styles, and even some content once. This not only helps on slow connections, but even on fast connections an additional resource request can take a (relatively) long time to do an entire TCP and SSL handshake transaction.\nA further performance increase can be gained through websites\u0026rsquo; use of CDNs for their libraries and assets. If you are loading jQuery, FontAwesome, or bootstrap from local, you are doing it wrong. Pulling these assets from a CDN not only reduces the load on your server and the latency of the client accessing the resource, but allows caching these common resource between sites. If you visit a site using version x of the y library and then visit another site that uses the same version of y, you should be able to cache the first request of that resource and reuse it for any subsequent pages in any site. You can only do this if you using a CDN (and the same, but realistically most resources either have their own CDN or use one of the most common CDNs that everyone else uses).\nAdditionally, the use of site-specific apps (while annoying) allows the apps to only pull new content and \u0026ldquo;cache\u0026rdquo; all the resources needed to display the app. This makes it assured that outside of app updates, all most of the app\u0026rsquo;s traffic is the content you want to see (or ads sigh).\nMobile Focused Pages Thanks the the horrible practices of the Cellular Companies, anything that is loaded on a cellular connection needs to be small to not use much data to fit within limited bandwidth and even more limited data caps. While I have a great distaste for the stupidity of Cell carriers, their limitations have forced encouraged developments in efficient compression and transmission of pages (as well as a lot of bad practices in lazy loading and obfuscating in the name of minifying). Mosts sites will load smaller or more compressed assets when they detect they are on mobile platforms.\nCaveats While I did \u0026ldquo;survive\u0026rdquo; on the limited connection, I knew it was coming and was able to prepare a bit for it. I downloaded a couple of additional playlists on Spotify and synced a few episodes of TV to my phone from my Plex. However, I did not even use these additional downloads. I used the podcasts I had previously downloaded and even downloaded an additional episode while there. The ability in most apps to download content makes even a trickle of internet be enough to slowly build up the content you want.\nI have also recently reset my laptop and had to download FFmpeg while there. It took a few minutes, but it didn\u0026rsquo;t fail. I did want to do some complex computing while there, but since most of what I do is on other computers (servers, remote machines, etc) it was incredibly easy to do what I wanted to do through an SSH connection to a datacenter. This is cheating a little bit but really is not out of the ordinary; even on fast internet I would SSH out to do things I didn\u0026rsquo;t want or couldn\u0026rsquo;t do on my device (thanks Windows). This not not that different from devices like Chromebooks which almost entirely run remotely and require an internet connection to function (or function with all features).\nThis was also a family gathering, so I didn\u0026rsquo;t spend much time on the internet. I could quickly google the answer to win an argument and that was all I needed.\nConclusion Slow internet is still a pain, but I\u0026rsquo;ve grown to appreciate its limitations and work around them. Several trends in computing and content delivery in recent years have made slow internet more bearable. I won\u0026rsquo;t be giving up my high-speed internet any time soon, but slowing down and disconnecting a bit is a nice change of pace in this time where everything has to happen online.\n","description":"While it is always a shock to go from over 100Mbps to under 1Mbps, I think that we are in an age where low bandwidth is not a show-stopper.","id":17,"section":"posts","tags":["web","life","opinion"],"title":"A Trickle Is Better Than Nothing","uri":"https://johnhollowell.com/blog/posts/a-trickle-is-better-than-nothing/"},{"content":"2021. A new year; a new start.\nI\u0026rsquo;ve wanted to start a blog for a while, and I thought I might as well start it on the first of the year. I think I finally have enough things I want to talk about that a blog is worth the effort.\nWhat\u0026rsquo;s in a Name? So why the name \u0026ldquo;/dev/random\u0026rdquo;? Well, I\u0026rsquo;m a geek and this blog will be about anything. I don\u0026rsquo;t want to confine this blog to any one subject (including to just tech) and I want the entirety of the blog to be representative of that. It also give me the opportunity to have a punny subtitle, which I am always appreciative of.\nSo\u0026hellip; Why? This blog is mostly a place for me to put information for my future self and others. Don\u0026rsquo;t expect any deep, rambling prose. I\u0026rsquo;m not a spectacular writer and there are many things in my life that don\u0026rsquo;t merit blogging about. However, I have a very wide range of knowledge which I often will forget by the next time I need to use it. This gives me a way to record my experiences and experiments in a public place to which I can reference others. This blog is also an experiment, how meta is that?\nWhen can I get more of this great content? I would like to at least work on this blog every day. That doesn\u0026rsquo;t mean a new post every month; longer and more detailed posts will take me a bit longer. I might hold a post so a whole series can be release together. I might get bored and never create another post. Who knows?\n","description":"I've wanted to start a blog for a while, and I thought I might as well start it on the first of the year. I think I finally have enough things I want to talk about that a blog is worth the effort.","id":18,"section":"posts","tags":null,"title":"And So It Begins","uri":"https://johnhollowell.com/blog/posts/and-so-it-begins/"}] \ No newline at end of file +[{"content":" I\u0026rsquo;ve had my Steam Deck (256GB version) for a bit over 6 months now and I love it! Being a mostly keyboard-and-mouse gamer, that was a bit of a surprise to me.\nUnboxing Experience Opening the Steam Deck was an easy process. The packaging has fun Valve-y designs and contains the contents well without using unneeded extra packaging.\nSetup (or lack thereof) The initial setup process was as simple as possible. Sign into your Steam account and you are pretty much good to go!\nPlaying Games Playing games just works. With the Proton layer, most Windows games fully work. Even some games with Anti-Cheat software (I\u0026rsquo;ve tried Fall Guys) will work on the Steam Deck.\nDownloading Games Steam recently added a feature which is fantastic for the Deck. If there is a computer running Steam on the local network which already has a game downloaded, Steam will automatically download to your device from the other device, preventing the game from having to be downloaded from the internet and being able to use your much likely much faster LAN. Either machine can cancel this and force the downloading device to pull from the internet, but it is a great feature to be able to pull a game onto the Steam Deck at full 1 Gbps or 2.5 Gbps speed with the use of a USB-C ethernet adapter.\nPerformance It plays the games I want to play (even emulators of more recent consoles) at a stable 40-60 fps with medium-high quality settings. Check out proper reviewers for details, quantitative performance metrics. For more demanding games, I can play them at home by streaming them from my desktop. The Deck can play more demanding games if you lower quality settings and are okay with reducing your target framerate to 30-40 FPS, but it really isn\u0026rsquo;t well suited for it and performs much better in the lighter games at which it excels.\nBattery Life Admittedly, I\u0026rsquo;ve mostly used the Deck at home or in a location with easy access to power, so the battery life of the Deck has not been super important to me. However, for most of the light games that I play on it, the Deck gets 3-4 hours of play time, more than I really want (should) to play in a single sitting. Add an external battery pack and you can game for even longer.\nObviously with a triple A title, the battery is not going to last that long, the Deck will crank out so heat, and the fan will kick into high gear. But I rarely play these on the Deck anyway as I have a gaming desktop for these types of games.\nExploring the Controls This is by far the best part of the Steam Deck in my opinion. There are so many controls and Steam allows you to rebind and configure the inputs in just about any way you want. All the controls are comfortable for my medium-large hands to use. Especially for games which include Steam Input support (mostly just Valve games as of now), the ease of remapping inputs and creating complex input schemes is fantastic. The ability to tie in the gyroscope, back buttons, and touch pads makes it easy to setup a very fluid and natural control. And the ability to share controller profile means that the community (and you!) can share the tweaks that work the best for them and game authors can create custom controller mappings for their games. The control customization deserves its own entire post.\nThe main thing I appreciate is the back L4, L5, R4, and R5 buttons. Since basically no game uses them, they can be mapped to duplicate other buttons. I\u0026rsquo;ve found it useful on a lot of games to remap these buttons to ABXY so that I can use those buttons while using the right stick. For emulated games, I\u0026rsquo;ve really liked mapping the rear buttons to emulator commands, like the \u0026ldquo;Hotkey\u0026rdquo; button for RetroArch. This makes it much easier to perform emulator commands like creating a save point or pausing the game.\nUsing the Desktop Mode While I haven\u0026rsquo;t done a lot of different things in the Desktop Mode, there are times where it is invaluable. Since the Deck is a full Linux computer, you can go in and adjust files, add mod files to games, or just use it as a desktop. Desktop Mode also allows you to install applications not in the Steam store, like indie games, Discord, or anything that works on Linux. The read-only file system can be an issue, but most everything is a flatpak now, so that is less of an issue.\nFavorite Software These tools and apps greatly improve the experience or functionality of the Steam Deck.\nEmuDeck I\u0026rsquo;ll make a more in-depth review of it, but if you like playing old console or arcade games, EmuDeck sets up emulators for just about any console or system you can think of. It streamlines the setup of tools like RetroArch and EmulationStation to allow you to seamlessly play any old game for which you own (or can \u0026ldquo;acquire\u0026rdquo;) the ROMs. It can even add your games as game entries to Steam, allowing you to directly launch an emulated game from Steam.\nDecky Decky adds so many great plugins which can do everything from syncing your play status to Discord to adjusting detailed performance/clock/TDP tunables to controlling how fast/much the Deck can charge.\nHere are the plugins I currently have installed\nEmuDeck Hotkeys (preinstalled by EmuDeck) Network Info AutoSuspend PowerTools KDE Connect KDE Connect is a great tool for many uses, but I have found it the most useful for using another computer/phone as the keyboard for typing in complex passwords. It is super useful if you use a password manager. You can use a device which is already logged into your password manager, copy the credentials for whatever you are logging into on the team Deck and paste it via KDE Connect.\nFavorite Games These games are a combination of games I already enjoyed playing on desktop and games that where specifically reccommended to me to play on the Steam Deck. Enjoy them all!\nVampire Survivors Sky Rouge Fall Guys Kalimba A Short Hike The Future I will likely swap out for an OLED Steam Deck at some point, but more out of a desire to have the latest shiny thing and not due to the Steam Deck LCD being bad. I\u0026rsquo;ve really liked the Steam Deck and am happy I got it. It has opened a whole new world of simpler games to me and allowed me to come back to retro games I played as a kid.\n9/10 - Love it and the gaming it has given me. Would love it to somehow break the laws of physics and get more performance and longer battery without changing its size.\n","description":"","id":0,"section":"posts","tags":["hardware","gaming"],"title":"6 Months with the Steam Deck","uri":"https://johnhollowell.com/blog/posts/steamdeck/"},{"content":" The Tale Begins There I was, triaging a new issue that came in. A Linux VM running in the cloud was hanging when we started trying to run our workload on it. Huh, there was no output at all from the python script; it didn\u0026rsquo;t even create its log file, one of the first things it should do. Logging into the cloud instance, I looked around and noticed there was a python process running for the script we started, so the connection to the host and creating the python process at least worked. Well, since it didn\u0026rsquo;t work the first time, I killed the process and tried running the same command manually to see if there was an issue with the setup of the process. Aaaannnndddd it hung. But it doesn\u0026rsquo;t hang with the exact same NFS mount and AMI (root disk image) in a different cloud account we use.\nWell, this is interesting. Okay, let\u0026rsquo;s just look at the script we are running. Hung. Welp, I guess it is time for the good old turn-it-off-and-on-again fix. Now let\u0026rsquo;s look at the script. That seems fine. Let\u0026rsquo;s look at the python executable binary we are running. Hung. Uh, okay. Let\u0026rsquo;s check the script again. Hung. Well it looks like an NFS issue. Wireshark Time!\nAfter a bunch of test reads and write to the NFS mount with Wireshark slurping up packets, it looks like the client sends out read requests and the server never responds. The TCP connection retransmits the un-ACK\u0026rsquo;d packets until the TCP session times out, sends a RST, and sends the read request again.\nAfter inspecting the traffic in the AWS flow logs and in the cloud-to-on-prem firewall, it seems that all the traffic is correctly making it from the cloud client to the on-prem NFS server. So, what do we do now?\nAfter a bunch of additional tests, I ran a test of incrementally increasing the size of a file being written one byte at a time. The writes started to fail around 1300 bytes. Looking at the traffic in Wireshark, these write requests approached 1500 bytes. While both the server and client were using jumbo frames (9000 MTU), it is possible there is a 1500 MTU link somewhere between these two hosts.\nDiscovering the Path to a Fix Collaborating with our cloud operations team, we confirmed that the Direct Connect between the cloud and on-prem did have a 1500 MTU. However, this did not explain why the client/server could not use the standard Path MTU Discovery (PMTUD) to detect the smaller link and reduce the effective MTU to the lowest MTU along the path.\nPMTUD activates when a frame which is too large for a link is sent with the Don\u0026rsquo;t Fragment (DF) flag set. When network gear receives a frame too large for the MTU of the next hop, it will either fragment the packet or if the DF flag is set, return an ICMP error \u0026ldquo;Fragmentation Needed and Don\u0026rsquo;t Fragment was Set\u0026rdquo; packet to the sender and drop the packet. Testing in the other AWS account, this worked correctly and the TCP session downgraded to a 1500 MTU (technically the MSS was reduced to 1500 not the MTU, but that is a whole other topic). However for some reason in the original account, the session did not reduce to 1500. Comparing a packet capture from both accounts, I noticed that there was no ICMP error response in the broken account.\nAWSucks After much back-and-forth with our cloud ops team, we found that in the broken account there was an additional layer on top of the Direct Connect. The AWS Transit Gateway not only has a maximum MTU of 8500, but also does NOT return an ICMP \u0026ldquo;fragmentation but DF\u0026rdquo; error. So the client or server sends a packet larger than the MTU of the Transit Gateway, the TG drops the packet without informing the sender of why the packet is being dropped, and the sender continues to retransmit the packet for which it has not received an ACK thinking it was just randomly dropped.\nFinding Another Way So PMTUD won\u0026rsquo;t work; great. And we can\u0026rsquo;t reduce the client\u0026rsquo;s MTU to 1500 as there are workloads running on it which must have jumbo frames. Thus began a flurry of research resulting in me learning of Linux\u0026rsquo;s Packet-Later PMTUD. Using the net.ipv4.tcp_mtu_probing kernel tunable, we can enable an MTU (really MSS) size discovery for TCP sessions.\nHow It Works When the sender sends a packet which is too large for a link in the path of an active TCP connection, the too-large packet will be dropped by the network and the sender will not receive an ACK from the receiver for that packet. The sender will then retransmit the data on an exponential backoff until the maximum retransit count is reached. The sender will then send a RST and try a new TCP session (which if tried with the same size packet will just continue to repeat).\nThe tcp_mtu_probing functionality takes over once the standard TCP retransmit limit is reached. With tcp_mtu_probing enabled, the kernel\u0026rsquo;s network stack splits the offending packet into net.ipv4.tcp_base_mss sized packets and sends those packets instead of the too-large packet. For further packets, the network stack will attempt to double the current packet limit until it again fails to ACK the packet. It then uses this new largest packet size for all future packets for the TCP session. Linux 4.1 improves on this functionality by using a binary search instead of multiple doubling of the MSS. The initial reduced packet size starts at tcp_base_mss and then binary searches for the largest functioning MSS between the tcp_base_mss and the MTU of the interface passing the traffic.\nA great article digging deeper into this is Linux and the strange case of the TCP black holes\nConclusion While the ideal solution would have been for AWS to fix their broken, non-compliant network infrastructure, it is unlikely they will ever fix this. Using a solution which is built into the Linux kernel which allows the continued use of Jumbo frames for cloud-local traffic which preventing traffic over the Transit Gateway from breaking due to large packets.\n","description":"A simple issue at work with cloud hosts not being able to access an NFS mount on-prem turn into a multi-month bug hunt which ended with finding a low MTU network path and an AWS \"feature\" (pronounced bug)","id":1,"section":"posts","tags":["cloud","AWS","networks"],"title":"Unraveling the Mystery of NFS Hangs, or How The (Hybrid) Cloud is a Pain","uri":"https://johnhollowell.com/blog/posts/aws-tg-mtu/"},{"content":" I\u0026rsquo;ll start off by saying I love my Framework laptop. The transition from my old 15\u0026quot; laptop to this 13\u0026quot; Framework has been a lot more seamless than I thought it would be. It has worked perfectly for everything I\u0026rsquo;ve put it through.\nMy Experience With My Framework Battery Life Even with the recently-replaced batter in my old laptop, my Framework has a much longer battery life. Likely a combination of both the battery and processor, I\u0026rsquo;m able to get many hours of even a demanding workload. I\u0026rsquo;me able to have Discord open in a video call for hours while having many other browser tabs or games running without the worry of where my charger is.\nLap-ability The one loss from moving from a 15\u0026quot; laptop to a 13\u0026quot; laptop is the lessened ability to use it effectively on my lap while connected to cords. The smaller size of the 13\u0026quot; means that it sits more between my legs rather than fully on top of my legs. This is normally fine, especially since the fan vents to the rear rather than to the right or left so my legs aren\u0026rsquo;t getting blasted with heat, but it does make having cables connected to the ports is difficult and strains the cables\u0026rsquo; connectors.\nThankfully, I typically only need to have my charger connected to my laptop, so I found a solution. Since my charger is a type-c charger, I can just pop out one of my modules and directly connect the charger\u0026rsquo;s cable to the deeply-inset type-c port behind where the module would go. This make only the small cable be pressed against my leg and does not put any strain against the cable.\nCharging Fan One thing that has disappointed about my Framework is the leaf blower it turns into when plugged in to charge (when the battery is discharged). I think a combination of moving from the \u0026ldquo;Better Battery\u0026rdquo; Windows power profile while on battery to \u0026ldquo;Best Performance\u0026rdquo; when plugged in and the extra heat from the high-speed charging capabilities means the fan kicks up to be quite loud when plugging in. I have not played around much with power profiles to try to reduce this, but it typically only lasts for a short time and I almost always prefer the better performance rather than a bit of ignore-able noise for a bit.\nPhysical Camera/Microphone Switches I didn\u0026rsquo;t think this would be a big thing, but it is really nice to be able to have confidence that at the hardware level, my mic and camera are not able to be accessed.\nE Cores As I have a wide, eclectic collection of software I run on a regular basis, I was please to not run into many issues with programs not properly understanding/scheduling with the efficiency cores on the 12th gen Intel processor. There are some tools (e.g. zstd) which doesn\u0026rsquo;t properly gather the cores to use. However this could be due to running some of these quirky tools in WSL and how some tools try to detect hyper-threading to schedule themselves only on physical cores.\nFOMO? Now that 13th gen Intel and AMD mainboards have come out for the 13\u0026quot; Framework, do I feel like I am missing out or should have waited? not at all. If I would have needed a laptop once the 13th gen had come out, I would definitely have chosen to use the 13th gen mainboard, but I am happy with what I have. Especially since I rarely have a use case for a high-performance laptop, I\u0026rsquo;m very comfortable with my 12th gen.\nPart of the appeal of the Framework is that I don\u0026rsquo;t have to have as much of a fear of missing out. The new laptops all have the same hardware outside of the mainboard. If I want a 13th gen laptop, I can easily upgrade my existing laptop to the 13th gen and get a 12th gen computer to use as a server, media PC, etc. And if I keep my laptop for long enough that the hardware is wearing out, I can replace the parts that are broken (or of which I want an improved version) and keep all the remaining parts, reducing the cost of repair and keeping still-good parts from ending up e-waste.\nAs for regrets getting the Framework rather than some other newer system, I have none. I have not stayed as up-to-date with the laptop scene since I\u0026rsquo;m not currently in need of a new one, but the systems that I have seen have not presented any better features or performance for my use cases. Some of the new Apple laptops have been interesting to follow, but I\u0026rsquo;m not a big fan of many aspects of Apple\u0026rsquo;s hardware and ecosystem and I still do come across some software that is not compiled for ARM (a big one being Windows). I love ARM and use it quite a bit in my homelab (mostly Raspberry Pis), but for my main system is just not quite universal enough for a daily driver.\nConclusion Overall, I\u0026rsquo;m very happy with my Framework and would absolutely recommend it to others. Yes, it is more expensive than another laptop with comparable specs, but the Framework\u0026rsquo;s build quality is supreme. If your use of laptops is more disposable, the Framework may not be for you (and that is okay), but I value the goals of the Framework and truly expect to get my money\u0026rsquo;s worth out of the repairability and modularity of the Framework.\n","description":"After living with the 13\" Framework laptop and releases of new specs for the 13\" and plans for the 16\", I've got some thoughts on my Framework","id":2,"section":"posts","tags":["hardware","life"],"title":"Framework Followup","uri":"https://johnhollowell.com/blog/posts/framework-followup/"},{"content":" I recently upgraded my laptop to a Framework laptop since my old trusty laptop\u0026rsquo;s screen cracked and a replacement screen cost as much as some new laptops. These are my initial impressions of the laptop\u0026rsquo;s build, performance, and usability.\nUse Case I have a bit of a minimal use case for my laptop. Since I have a powerful desktop and a fairly performant phone, I don\u0026rsquo;t need my laptop to be a do-everything device. If I need to do something that requires a lot of performance (gaming, heavy development builds, video transcode, etc), I will use my desktop. If I need to quickly do something, I will use the phone that is always in my pocket or on the desk next to me. My laptop fulfils three main functions: portable large-screen remote access to desktop, couch web-browsing and light development, and media consumption while on the road.\nDesktop Remote The main place I will install games and software, store some files, and do high-performance tasks is on my desktop. I often will need or want to do something on my desktop while not sitting at my desk. Be it from a few meters away on the couch or thousands of kilometers away, I will often remote into my desktop from my laptop. There are not really any specific requirements, but a large screen, enough CPU performance to decode the remote screen stream, and good enough networking to get the connection through. This is honestly the lowest performance need for a laptop, but having hardware decode for whatever remote solution would provide long battery life for this use case.\nCouch Computer This is the middle-of-the-road use case in terms of requirements. It is mostly web browsing, some light video consumption, and low-demand development/writing (like writing this blog). I use VS Code devcontainers for just about everything, so being able to run docker and VS Code well is a must. Mostly, this presents as having enough memory for the containers, VS Code (thanks memory-hungry electron), and all the extensions I typically use. Occasionally, having some performance is nice to be able to build a new dev container (fast network to pull dependencies, fast CPU to decompress image layers and compile dependencies, and mostly fast disk to support fast installation of packages, create new layers, etc.) and makes getting started contributing to a new project incredibly streamlined.\nOn-the-road System This is the most taxing use case that I have for my laptop. This is everything from Couch Computer and more. Some video transcoding (compressing) of footage I\u0026rsquo;ve taken, some light (and not-so-light) gaming, and occasionally some heavy network traffic (using my laptop as a portable NAS or sneaker-net).\nThis is also the use case where the connectivity of the laptop is the most important. From hooking into projectors using HDMI, to needing ethernet for some network troubleshooting, to flashing a Raspberry Pi or reading images from an SD card, the most variability in how I interact with my computers is on the road. The ample expansion/connectivity modules make it easier to have the right connector where I want it, when I want it. Also, the ability to move my ports around mean I will never have to do the awkward my-HDMI-is-on-the-wrong-side-for-this-podium dance again. Further, having 4 thunderbolt USB-C ports means that even if there is not an official module for what you want, you can easily connect a dongle or even make your own modules. Always in the data center? make yourself an RS-232 serial port module for interacting with all the serial consoles on your hardware.\nDesktop Replacement As a bonus use case, I will very, very rarely use my laptop at my desk instead of my desktop. My work laptop usually sits on my desk, plugged into a thunderbolt dock connected to all my peripherals and monitors. Every once in a while, I might use this setup with my personal laptop in this setup if I was working on some project on my laptop that would be too cumbersome to move to my desktop but might benefit from the extra monitors and peripherals.\nBuild Form Factor The Framework is a 13.5\u0026quot; laptop with a 3:2 screen ratio. While I\u0026rsquo;m used to my previous laptop\u0026rsquo;s 15\u0026quot; form factor, the added height of the Framework\u0026rsquo;s screen and higher resolution maintains a good amount of screen real estate. It also provides a more compact body which is more portable and takes up less space on a desk. Weighing in at 4.4 lb, it isn\u0026rsquo;t a light laptop, but the incredibly sturdy chassis and zero deck flex on the keyboard are reason enough for the bit of weigh.\nPower and Battery It uses Type-C (USB-PD) for charging via any of the 4 expansion ports when a USB-C expansion module is installed (or really you can directly connect to the type-c ports at the back of the expansion ports). This allows charging from either side of the laptop which brings a great versatility. While writing this, the idle power draw was ~15W at a medium-low screen brightness. Running a benchmark, the draw from the USB-C charger reached ~62W (on a 90W charger).Charging from 0% to ~80% while powered off averaged around 40W. Charging from ~85% to 100% averaged around a 30W draw (~10W to the battery and ~15W to the idle running system).\nKeyboard The keyboard is easy to type on with ample key spacing and a sensible key layout. I wrote this whole post on the Framework\u0026rsquo;s keyboard. The keys have good stabilization and have a comfortable travel distance. The palm rest areas beside the trackpad are large enough to use and the keyboard is centered on the chassis so one hand/wrist is more extended than the other.Overall, an easy keyboard on which to type.\nTrackpad Not much to say about the trackpad, and that is a good thing. The trackpad is a nice size: not too small to be useless and not too large to be cumbersome to use. It has a nice tactile click when pressed (which I rarely notice since I mostly tap-to-click rather than use the actual displacement button method of clicking) and a smooth surface which is easy to swipe across. The trackpad\u0026rsquo;s palm rejection while typing is very good, but the button still functions while the movement is disabled. If you place a lot of weight on the insides of your hands while typing, you may need to be careful to not push too hard on the trackpad while typing. The typical multi-touch gestures work correctly and smoothly zoom, swipe, and the rest.\nSpeakers The speakers on the Framework have impressed me so far. I will use earphones/headphones over speakers most of the time, but the speakers are much better than my previous laptop\u0026rsquo;s speakers and are a nice, usable option. They are quite loud and even at 100% there is no distortion, clipping, or chassis rattle. Although the speakers are down-firing at the front (user-facing side), they are on the angled bevel of the side so even sitting atop a flat surface the speakers fire out and around the chassis to provide a well-balanced sound profile.\nPerformance CPU My Framework performs well. I got the i5 12th gen variant (i5-1240P, up to 4.4 GHz, 4+8 cores) as a low power yet still performant portable system. Following on the Desktop Remote section above, I very rarely need my laptop to be very performant. What I want most of the time is something that can boost to do a little bit of compute while mostly being a power-efficient system that can run web apps, remote desktop software, and YouTube. The system excels at these tasks. I\u0026rsquo;ll leave the hard numbers and comparisons to benchmark publications, but the system has done everything (within reason) I\u0026rsquo;ve thrown at it.\nMemory While it may seem basic, the ability to have socketed memory can\u0026rsquo;t be ignored in modern laptops. Being able to upgrade and/or expand your system\u0026rsquo;s memory down the line is one of the simplest ways to give an old machine a boost. However, a lot of new machines are coming out with soldered memory that can\u0026rsquo;t be upgraded, expanded, or replaced. The availability of 2 SODIMM slots for memory is a great feature for repairability and the longevity of the system.\nCooling and Fan One disappointing aspect of the Framework is its cooling system and fan. When idle, the fan is inaudible and the user-facing components stay cool. However, even when idle the bottom chassis panel gets slightly too warm to hold for a long time. While on a desk, this is not an issue but when on a lap (where the lap in laptop comes from), the heat it a bit too much for bare skin contact and going hand-held with one hand on the bottom for support is not comfortable to hold. However, even when running full-tilt under a stress test, the top (keyboard, trackpad, and palm rest areas) stayed cool and comfortable.\nThe cooling fan, when going at full speed, is loud but does an adequate job of keeping the internals cool and preventing drastic thermal throttling. A concern I had heard from others was with the vent being in the hinge and concerns over the cooling capacity of the system while the screen is closed. After some tests, the hinge cover is shaped to direct the exhaust air out the bottom of the hinge which gives enough airflow to keep the system cool.\nWiFi 6E While I currently don\u0026rsquo;t have any other wifi gear which supports 6E to test against, I believe 6 GHz is going to be super useful in the coming years and having a computer that already supports it is a great feature. And even if it didn\u0026rsquo;t have a 6E chip in it, the Framework\u0026rsquo;s wifi is socketed which allows for future improvement.\nFor what I can test, the Framework\u0026rsquo;s WiFi works well. It gets the maximum speed my Access Point (AP) supports and has very good range. I haven\u0026rsquo;t noticed any difference it reception between different orientations of the laptop, so the antenna placement seems to be the best it can be.\nUsability I/O The ability to select the I/O that your laptop has is one of the obvious usability features of the Framework. The ability to have up to 4 USB-C thunderbolt ports is impressive and the various modules to adapt those ports into other common ports is fantastic. My favorite ability so far is just having a USB-C port on both sides of the laptop. When I was searching for a new laptop, few had a Type-C port and even fewer had at least one on both sides. The Framework works well with all the USB-C and thunderbolt docks and dongles that I have used with it.\nBattery Another great usability feature is the long battery life. The combination of an efficient processor and a high-capacity battery makes the Framework able to stay running for hours.\nSecurity, Privacy, and Webcam For security and privacy, the Framework has several great features. For signing in (on supported OSes), you can use the fingerprint sensor integrated into the power button for authentication. While my previous laptop had a Windows Hello capable camera, the fingerprint reader is just about as easy to use. The fingerprint reader works well\nOn the webcam, the Framework has physical toggles to disable the webcam and disable the microphone (independently). They toggles have a nice red section visible when disabled and the camera has a light when it is active. It is really nice to have physical switches for the cameras, and since I am using the fingerprint sensor for login (instead of the facial recognition of my previous laptop), I can leave the camera disabled most of the time. The camera is 1080p and does a good enough job with challenging situations like low light and high contrast environments.\nScreen The screen is a 2256 x 1504 (3:2) glossy screen. The extra screen real estate is nice for tasks that can make use of the extra vertical space, media consumption which is mostly 16:9 or wider leaves unused space on the screen. The maximum brightness of the screen is quite bright and is easily visible in direct sunlight. The screen also has a light detector which can be used for automatic screen brightness adjustments. However, at least in Windows, the auto brightness works well but causes a massive jump in brightness when adjusting to above ~50%. Due the the glossy, highly-reflective screen, bright sun from behind makes it hard to read the screen even at maximum brightness. I\u0026rsquo;m planning to investigate what matte screen films/protectors are available that I could use to make the screen less reflective. As I will very rarely use my laptop for very color accurate uses, a matte screen would be better.\nWindows Install and Drivers One cautionary note revolves around the newer, less used components in the Framework. I installed Windows 10 and out of the box, the trackpad and WiFi did not work. I had to use an Ethernet dongle (since I did not get the ethernet Framework module) to download the driver pack from Framework\u0026rsquo;s website. It did not automatically get the drivers from Windows Update like most other firmware/drivers. I also tried Ubuntu 22.04, and while it had fully functional WiFi and and trackpad out of the box, it did not properly adjust the screen backlight based on the function keys (but was able to control the brightness manually using the OS settings slider).\nOverall Impressions Overall, I really like my Framework laptop so far. I did not think I would like the smaller size, but setting the display scaling to lower than the default of 200% (I\u0026rsquo;m testing between 175% and 150%) give more than enough screen space for task I need to do on my laptop. After writing this whole post on the keyboard both on a couch and a desk, it is comfortable to type on and quick to pick up touch typing. It is small and portable while having good performance, battery longevity, and screen real estate. I wish it was a bit bigger as I like a laptop with a larger screen, but for the chassis size the screen is nearly 100% of the size of the laptop footprint. With a 11-in-1 USB dongle, it has as much or more connectivity than my desktop. It works flawlessly with thunderbolt docks (at least the ones I have tested). The first install of Windows 10 was a little painful having to install the driver bundle, but that is a small, one-time price to pay for a nice machine on an old OS.\n9.5/10. Would recommend.\n","description":"I recently upgraded my laptop to a Framework laptop since my old trusty laptop's screen cracked and a replacement screen cost as much as new some laptops. These are my initial impressions of the laptop's build, performance, and usability.","id":3,"section":"posts","tags":["hardware","life"],"title":"Framework First Impressions","uri":"https://johnhollowell.com/blog/posts/framework-first-impressions/"},{"content":" Trying to boot off an NVMe drive on older hardware can cause some issues. If you are running an older BIOS/UEFI, it may not have the needed drivers to understand how to talk to a NVMe drive. I ran into this exact issue when trying to boot my Dell R510 from an NVMe drive.\nTo boot from NVMe, I would need to use some shim which could be booted by the BIOS which would chain-boot the actual OS on the NVMe.\nAttempt 1 - Clover The first method I attempted to used was the Clover Bootloader. Clover, while primarily used for Hackintoshes, can have NVMe support added and chain boot to another disk. I wanted to try this first as I would prefer an OS-indifferent solution that would continue to work no matter what I installed on the NVMe.\nI attempted to image Clover onto a USB drive and after several wrong attempts, I finally formatted the USB as fat32 and just copy/pasted the contents to the drive. I then followed instructions I found to enable NVMe compatibility by copying NvmExpressDxe.efi from EFI/CLOVER/drivers/off into EFI/CLOVER/drivers/BIOS/ and EFI/CLOVER/drivers/UEFI/. I then modified the EFI/CLOVER/config.plist file to automatically boot the the NVMe drive after a 5 second pause.\nHowever, I could never get Clover to read this config.plist file. I tried placing it in other paths that were suggested by comments on the internet. I tried reverting to the original file and modifying one small value to ensure I had not messed up the file formatting. Still, I could not get Clover to read the config file and automatically boot from the NVMe drive. It would just remain at the boot selection menu where I could manually select the NVMe to boot from which would then work perfectly.\nAttempt 2 - Proxmox Boot Proxmox comes with the proxmox-boot-tool tool which is used to synchronize all the boot disks with the UEFI (ESP) partition. After giving up on Clover, I looked into proxmox-boot-tool and found I could just place an extra ESP partition on the USB drive and let proxmox-boot-tool keep it up-to-date and synced.\nRather than creating the correct partitions in the correct locations and of the right size, I just did a dd if=/dev/\u0026lt;root pool\u0026gt; of=/dev/\u0026lt;usb drive\u0026gt; bs=1M count=1024 to copy over the first 1 GB of the disk. I then used gparted to delete the main partition (leaving the BIO and ESP partitions) and to give the remaining partitions new UUIDs. I then booted into Proxmox and proxmox-boot-tool format /dev/disk/by-uuid/\u0026lt;USB ESP partition UUID\u0026gt; --force and proxmox-boot-tool init /dev/disk/by-uuid/\u0026lt;USB ESP partition UUID\u0026gt;. Once that finished, I rebooted and the USB drive was used as the boot drive which booted into the main Proxmox OS.\nConclusion I\u0026rsquo;ve had this in place for a few months now and it has worked perfectly through several updates to the boot cmdline options and kernel updates.\n","description":"My process of finding the best way to boot Proxmox off an NVMe drive in an old Dell R510","id":4,"section":"posts","tags":["sysadmin","proxmox"],"title":"NVMe Boot in Proxmox on Older BIOS","uri":"https://johnhollowell.com/blog/posts/nvme-proxmox-bios/"},{"content":" This was my first year going to the All Things Open and my first in-person conference in several years.\nOverall, I really enjoyed the conference and would recommend other\u0026rsquo;s attend. It definitely helped that I already live in Raleigh so I didn\u0026rsquo;t have to travel to the conference, but even traveling to the conference would be a good experience.\nVenue The Raleigh conference center is a spacious venue. The paths to the session rooms are wide and easy to access. Most of the session rooms were large enough to fit everyone in the session. The conference center has ample surrounding parking and food options if the catered sandwiches don\u0026rsquo;t cover your appetite. The sponsor/vendor booths were set up in the atrium with plenty of room to interact with the vendors and still have room to walk past. All the areas were clean and tidy and the HVAC worked well in all but the smallest session room when it was packed.\nVendor Booths There were a lot of vendors spread around the whole atrium area. The conference did an interesting optional gamification addition to the conference: the keynote sessions and each vendor booth had a code which when entered into the conference app would add points to your score. At the end of each day to top scorers were randomly draw for some very nice prizes.\nThere were a lot of really nice vendors present. From large companies like AWS, Microsoft, and Meta to small FOSS organizations like the FSF and OSI. Many vendors had great swag and welcoming representatives to talk to. While most of the companies were definitely focused on selling to enterprise customers, there were many that had personal/community versions of the software available and knowledgeable people to answer technical questions.\nSessions The session subjects covered a wide range of enterprise related to tracks focused on the open source community and collaboration. Some of the sessions were livestreamed for the virtual attendees (and thus recorded) while some were not recorded. I mostly attended the non-recorded sessions as I can watch the recorded sessions later, but all the sessions were well attended.\n","description":"My experience attending All Things Open for the first time","id":5,"section":"posts","tags":["life","ATO"],"title":"All Things Open 2022 Impressions","uri":"https://johnhollowell.com/blog/posts/ato22/"},{"content":" This December I graduated from Clemson University with my Masters degree in Computer Science. There were a lot of things I learned (obviously), but I think the most useful things I learned were not from the course material, but my outside learning and interactions with peers and professors.\nProject Organization All my courses this semester were project based; while some of the grade for the course came from quizzes or homeworks, over 50% came from a semester long project. My experience with these projects greatly differed based on the professor\u0026rsquo;s involvement and whether it was a group project.\nProblem Definition One of my main gripes for several of my project assignments was the complete lack of the professor defining what the project should look like. While there was some guidance on the general category of project that was required, there was little to no guidance of what specific topics were in scope. We submitted a project proposal, which would have helped with validating the acceptability of the project, however the professors rarely commented on the validity of the proposal, let alone return a grade for the proposal in a reasonable amount of time (read: before the end of the semester).\nThis is a perfect example of why requirements gathering and client interaction is such an important part of the development lifecycle. Knowing the plan for the project before spending development time ensures it is not wasted on something that is not the desired result. Having strict requirements allows the developer to precisely match the functionality to the desired outcomes.\nDeliverables Another important aspect which was mostly glossed over was deliverables. While each professor did say a deliverable of the project would be a final paper, specifics on the format, length, and content of the paper were lacking or never given. In addition, other deliverables were requested after the initial assignment was created, often at the very end of the semester. While this is not that uncommon in \u0026ldquo;real life,\u0026rdquo; often added requirements/deliverables will push back the projects due date; not so with school projects which must be done by the end of the semester.\nGroup Work Group work in school is almost always a complete mess. Over the course of my Masters degree, I\u0026rsquo;ve been in some okay groups and a lot of bad groups. I\u0026rsquo;ve been in groups where someone went completely AWOL for several months and only responded to messages when it was time for them to add their name to the deliverables. I\u0026rsquo;ve also been in some groups that were fantastic where the team members understood that occasionally someone might have other stuff they needed to prioritize but everyone would at the end of the semester all contributed equally. The best groups recognized the different skills of each member and assigned tasks to the person that was most capable of completing it.\nGroup work in school is very different from working in teams in industry. In school your group grade is at best 10% based on your individual contribution. This leads some people to not contribute to the team and just accept a 90% at their max grade. In work, if you do not do the tasks assigned to you, no one is going to do your tasks and it is very apparent who\u0026rsquo;s responsibility they are. Getting paid do do the work rather than paying to do the work also drastically changes the motivation and desire to complete the work.\nSelf Learning Most of the course I took in my Masters program covered information I had learned previously either on my own or on the job. This meant that a large portion of the course material was redundant to me. However, these courses gave me the opportunity to deepen my knowledge of the covered material and utilize the professors as a resource to discover new corollary topics to learn on my own. This gave me the opportunity to learn at my own pace and follow the rabbit trails that I find interesting.\nI have also had courses that I had to teach myself; professors that don\u0026rsquo;t teach or teach wrong material. One professor in particular I had to stop going to class as listening to her lectures decreased/confused my pre-existing knowledge on the topic.\nLab Teaching Assistantship I had a lot of fun being a Teaching Assistant (TA) for a undergrad lab section this past semester. I got to befriend some really cool students and get a taste of what it takes to teach. As I would like to teach at some point in the future, this was a fantastic opportunity to understand some of the requirements of teaching, experience the \u0026ldquo;joy\u0026rdquo; of grading, and dealing with students\u0026rsquo; questions and concerns.\n","description":"This December I graduated from Clemson University with my Masters degree in Computer Science. There were a lot of things I learned (obviously), but I think the most useful things I learned were not from the course material, but my outside learning and interactions with peers and professors.","id":6,"section":"posts","tags":["clemson"],"title":"Masters Degree Takeaways","uri":"https://johnhollowell.com/blog/posts/masters-degree-takeaways/"},{"content":" ZFS is a great filesystem that I use on most of my systems and it makes full-drive backups a breeze when I am refreshing hardware in my homelab. However, sometimes I want to backup to a non-ZFS system. These are the steps I use for fast and verified backups to a file on another computer.\nTL;DR: Combine the power of ZFS, zStandard, pv, and netcat to have a fast backup of a ZFS snapshot with verbose metrics of the process and progress.\nBackground If you already know about ZFS, snapshots, replication, and zStandard, feel free to skip this section. ZFS is a next-generation filesystem which supports a lot of great usability, data integrity, and performance features.\nOne of the most useful features are snapshots. Since ZFS is a copy-on-write (COW) filesystem, it can make a \u0026ldquo;copy\u0026rdquo; of an entire filesystem instantly as it just stores the current state and keeps blocks of data even if they later get updated/deleted. This is incredibly useful for backing up a system, as you can make a snapshot of the system instantly while it is running and then take the time to transfer the data.\nZFS can take a snapshot and zfs send the data in a stream that can be piped to a file, other commands, or a zfs receive on another host to load the datasets to that host\u0026rsquo;s storage and make the files available on the live filesystem. Receiving to another system has many benefits, but one major problem is the destination requires a ZFS pool mounted that has enough unused storage to receive all the incoming data. Sometimes this is not feasible, or even if the destination has a working pool it is not desired to mix in another filesystem with the existing data. In this case, sending to a file will store the entire send stream that can later be cat\u0026rsquo;d back to a zfs receive whenever desired.\nOne other tool used in this guide is zStandard. This is a newer compression library with great compression ratios while maintaining fairly high compression speed and incredibly fast decompression speed. I love zStandard and try to use it in everything. It has also had a large adoption increase in the last year or so with many other projects including zStandard compression support (ZFS, btrfs, tor, and Rsync to name a few).\nSetup There are two hosts: one using ZFS which will be backed up (src.example.com), and one host which will store the backup (dest.example.com). This destination host only needs enough storage space to store the (compressed) send stream.\nAll code is run on src.example.com unless otherwise noted. Making a Snapshot ZFS send streams only work on snapshots, so we need to create a snapshot of the current files and data to be able to send it. If you already have a up-to-date snapshot (maybe from automation), you can just uses that snapshot.\nTo create a snapshot, you either need to be root (run the following command with sudo), or have the snapshot ZFS permissions on the dataset. As we will be creating a recursive snapshot of all datasets, it is easier to just run commands as root.\nThe format of the snapshot command is\nzfs snap[shot] pool/datasetA/subdataset/thing1@snapshot-name.\nTo snapshot the \u0026ldquo;testing\u0026rdquo; dataset on my \u0026ldquo;tank\u0026rdquo; pool with the snapshot name \u0026ldquo;backup_2021-01-02_0304\u0026rdquo;, I would use either command\n1 2 zfs snap tank/testing@backup_2021-01-02_0304 zfs snapshot tank/testing@backup_2021-01-02_0304 To backup an entire pool, use zfs snap -r tank@full_backup which will recursively (-r) snapshot the given dataset and all datasets below it.\nDetermining the Size of the Send Now that we have our snapshot, it would be nice to know how much data we will be sending and storing for our backup. We can either get a (fairly accurate) estimate of the size of the send (quick) or get the exact size of the send. Unless you really need to know the exact size of the send, I recommend the fast method\nFast Size We can get an estimate of the size of a send by running the send with the dry-run flag (-n) in verbose mode (-v).\n1 zfs send -R -n -v tank@full_backup The last line should tell you the estimate of the size of the send.\nSlow Size If you really need the exact size of the send, you can use wc to get the total bytes being sent.\n1 zfs send -R tank@full_backup | wc -c If you want to see the speed that zfs can read the send data off your storage, you can use pv (you might need to install it) to see the size and speed.\n1 zfs send -R tank@full-backup | pv \u0026gt; /dev/null #fullsend Now that everything is prepared, we can actually send the data to the destination. We\u0026rsquo;ll start with the most basic form and add on some extra commands to add speed and metrics of the status of the send.\nIn the following examples, the zfs send command is used with the -R flag. This makes an \u0026ldquo;replication\u0026rdquo; send stream which can fully recreate the given snapshot from nothing. You can omit it if that is not the functionality you need.\n-R, \u0026ndash;replicate\nGenerate a replication stream package, which will replicate the specified file system, and all descendent file systems, up to the named snapshot. When received, all properties, snapshots, descendent file systems, and clones are preserved. 1\nBasic Send Getting bits from A to B is pretty easy. We can use SSH to send the data to the destination host and save it as a file2.\n1 zfs send -R tank@full-backup | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap\u0026#34; We can use the size we found earlier to get a rough progress bar. pv can take in the size of the stream and use it to determine an ETA and progress. It can take integer values with units of \u0026ldquo;k\u0026rdquo;, \u0026ldquo;m\u0026rdquo;, \u0026ldquo;g\u0026rdquo;, and \u0026ldquo;t\u0026rdquo;3.\nAssuming we have 24860300556 bytes (23.2GiB), we could use either of the following\n1 2 zfs send -R tank@full-backup | pv -s 24860300556 | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap\u0026#34; zfs send -R tank@full-backup | pv -s 24G | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap\u0026#34; If you have ZFS installed on the destination, you can check validate the send stream using zstreamdump4.\n1 2 # on dest.example.com cat /path/to/saved/file.zfsnap | zstreamdump While this works and is super reliable, it is inefficient in its data storage size and transport cost. The send stream is uncompressed on your destination and SSH can use significant CPU on low-power devices.\nThe next two solutions seek to solve these problems.\nCompression As long as you are not sending a raw or encrypted snapshot, there will be some amount of compressible data in the send stream. We can compress the send stream so it is (a bit) smaller on the destination\u0026rsquo;s storage.\nYou can compress on either the source or the destination, however compressing on the source means less data is transmitted over the network which usually is slower than the CPU needed for compression.\nWe\u0026rsquo;ll use zStandard due to its speed, compression ratio, and adaptable compression level.\nBasic Usage\n1 zfs send -R tank@full-backup | zstd -c | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap\u0026#34; ZStandard can also use an adaptive compression level. This means that if the network is slow and the compressor would otherwise be idle, it can increase the compression level and can also reduce the level if the network speeds up. This does mean that it can be a low compression ratio, but if reduced storage space is desired, the stream can be recompressed (e.g. zstd -d /path/to/saved/file.zfsnap.zst | zstd -T0 -19 /path/to/saved/file_smaller.zfsnap.zst). The minimum and maximum levels for the adaption can be set, but using just --adapt defaults to sane defaults (3 to 15).\nIt can also use multiple threads to fully utilize all the cores in the host. The number of threads can be specified or set to 0 to use the same number of threads as cores (-T0)5. It has a verbose mode (-v) as well which gives insight to the compression level and compression ratio of the stream.\n1 zfs send -R tank@full-backup | zstd -c -v -T0 --adapt=min=1,max=19 | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap.zst\u0026#34; pv can also be used to give progress and speed calculations (however, it seems that the verbose output of zstd conflicts with pv):\n1 zfs send -R tank@full-backup | pv -cN raw -s 24G | zstd -c -T0 --adapt=min=1,max=19 | pv -cN compressed | ssh dest.example.com \u0026#34;cat \u0026gt; /path/to/saved/file.zfsnap.zst\u0026#34; Local Send Only use the following across a network you trust (not the internet). This method sends data unencrypted. SSH takes a lot of processing power to encrypt data when sending large amounts of data through it. If we are on a secure network where we can sacrifice encryption for speed, we can use netcat instead of ssh.\nHowever, there is not server on the destination (unlike the SSH daemon), so we need to start a netcat server on the destination to listen (-l) for connections on a port (12345) and have it redirecting to the destination file (with pv showing us stats on the receiving side).\n1 2 # on dest.example.com nc -l 12345 | pv \u0026gt; /path/to/saved/file.zfsnap Now we can send it data to save to the file\n1 zfs send -R tank@full-backup | pv -s 24G | nc dest.example.com 12345 Putting it all together 1 2 # on dest.example.com nc -l 12345 | pv \u0026gt; /path/to/saved/file.zfsnap.zst 1 2 3 4 5 6 # on src.example.com snapName=\u0026#39;tank@full-backup\u0026#39; zfs snap -r ${snapName} sendSize=$(zfs send -v --dryrun -R ${snapName} | grep \u0026#34;total estimated\u0026#34; | sed -r \u0026#39;s@total estimated size is ([0-9\\.]+)(.).*@\\1\\n\\2@\u0026#39; | xargs printf \u0026#34;%.0f%s\u0026#34;) zfs send -R ${snapName} | pv -cN raw -s ${sendSize} | zstd -c -T0 --adapt=min=1,max=19 | pv -cN compressed | nc dest.example.com 12345 https://openzfs.github.io/openzfs-docs/man/8/zfs-send.8.html\u0026#160;\u0026#x21a9;\u0026#xfe0e;\nAs far as I know, the .zfsnap is not an official or commonly used extension. However, it helps me know what the file is, so I\u0026rsquo;ve used it here. Use whatever file name and extension you want.\u0026#160;\u0026#x21a9;\u0026#xfe0e;\nhttps://linux.die.net/man/1/pv\u0026#160;\u0026#x21a9;\u0026#xfe0e;\nhttps://linux.die.net/man/8/zstreamdump\u0026#160;\u0026#x21a9;\u0026#xfe0e;\nThe documentation for zStandard notes that using the -T flag with --adapt can cause the level to get stuck low. If you have problems with the compression level getting stuck at a low value, try removing the threads flag.\u0026#160;\u0026#x21a9;\u0026#xfe0e;\n","description":"ZFS is a great filesystem which I use on most of my systems and it makes full-drive backups a breeze. However, sometimes I want to backup to a non-ZFS system. These are the steps I use for fast and verified backups to a file on another computer.","id":7,"section":"posts","tags":["ZFS","backup","sysadmin"],"title":"ZFS Backups to Files","uri":"https://johnhollowell.com/blog/posts/zfs-backups-to-files/"},{"content":"I first noticed Kernel Same-page Merging (KSM) while working with Virtual Machines (VMs) under KVM (in Proxmox VE).\nKSM is a way of reducing physical memory usage by using one physical page of memory for all duplicate copied of that page. It does this by periodically scanning through memory, finding duplicate pages, and de-duplicating them via virtual memory. It is an extension of how the kernel shares pages between fork()\u0026lsquo;ed processes and uses many of the same methods of sharing memory. KSM is most often used with virtualization to de-duplicate memory used by guest Operating Systems (OSs), but can be used for any page of memory which the program registers with KSM to scan. \u0026ldquo;Red Hat found that thanks to KSM, KVM can run as many as 52 Windows XP VMs with 1 GB of RAM each on a server with just 16 GB of RAM.\u0026rdquo;1\nVirtual Memory Background To fully understand how KSM works, a (at least) basic understanding of how virtual memory work is required.\nTo prevent programs from having to know where every other process on the computer is using memory, the kernel (the all-powerful dictator of the OS) tells each process it has memory starting at address 0. It then keeps a record of where in actual (physical) memory each block (page) or the virtual memory is located.\nIt uses this mapping to translate memory addresses each time the process reads or writes to memory.\n© Computer History Museum This virtual memory also allows things like memory-mapped files on disk and Copy-On-Write (COW) pages. When a process clones (forks) itself, it doesn\u0026rsquo;t have to make a copy of all the memory it was using. It simply marks each page as COW. Each process can read from their memory with both virtual addresses pointing to the same physical page (now marked COW), but when either attempts to write to memory, the existing physical page is left inn place (so the other process can still use it) and a new physical page is allocated and mapped to the writer\u0026rsquo;s virtual memory. This allows pages of memory that are not changed in forked processes to use no additional memory.\nthe same process is used by KSM: it finds duplicate pages in the memory ranges registered with it, marks one of the physical pages as COW, and frees the other physical pages after mapping all the virtual pages to the one physical page.\nhttps://kernelnewbies.org/Linux_2_6_32#Kernel_Samepage_Merging_.28memory_deduplication.29\u0026#160;\u0026#x21a9;\u0026#xfe0e;\n","description":"Today I Learned about Kernel Same-page Merging (KSM)","id":8,"section":"posts","tags":["Linux","memory"],"title":"TIL: Kernel Same-page Merging (KSM)","uri":"https://johnhollowell.com/blog/posts/til-ksm/"},{"content":" Please read A Trickle Is Better Than Nothing before reading this post. I just got over having no internet at my apartment for over a week. I was gone a portion of the week, but it was still very inconvenient. Working remotely doesn\u0026rsquo;t help as to get paid I need to have an internet connection (but not necessarily a fast connection).\nWorking Around It While I could have use cellular data to carry me through, I had already used a significant portion of my data cap on various travels this summer. I ended up just going onto campus and working from my laptop in a computer lab.\nWhile on campus (with its wonderful gigabit symmetrical internet), I downloaded some videos from my YouTube Watch Later playlist so I could have some videos to watch at home. I tried to do as much pre-downloading of content I could so I would have it accessible at home.\nMissing the Trickle So I had everything downloaded and I was fine, right? Wrong.\nI do more with my life than just watching YouTube. I play games, I browse social media, and (most frustratingly in this situation) I code. It is impossible to stay up-to-date on PRs and Issues without being able to connect to the internet. While I could have looked at the GutHub website on my phone, I have a lot of nice tooling around Issues/PRs that is on my desktop.\nI also wanted to open some PRs on some FOSS projects I want to improve. I couldn\u0026rsquo;t do a git clone, I couldn\u0026rsquo;t download the devcontainers needed for the new project and language, I couldn\u0026rsquo;t easily research how to do what I wanted in the documentation on StackOverflow. This stopped me dead in my tracks and forced me to either make a trip back to campus to get internet or use the limited cellular data I had left to clone the entire repo and pull all the require container layers.\nWhat If How could it have been if I had at least a small amount of internet? I would still utilize the high-speed connection at campus to download some content to watch, but I would have still been able to pull up the YT page for the video to see comments and the description and to comment and like myself. While it would have taken a while, I could have left the repo and containers to download while I was watching something or making dinner or overnight. I could have refreshed my Issues/PRs and get any updates on their status and checks. I could have seen that a new video was released by my favorite channel and either queue the video to download or go somewhere with internet to quickly download it.\nOverall, I am very grateful for the internet I have. This just makes me appreciate the internet all the more with its redundancy and high availability and goes to prove that the last mile is really the most vulnerable segment of any network or connection.\n","description":"I just got over having no internet at my apartment for over a week, and I can confirm that a trickle is better than nothing.","id":9,"section":"posts","tags":["web"],"title":"Nothing Is Definitely Worse Than a Trickle","uri":"https://johnhollowell.com/blog/posts/nothing-is-definitely-worse-than-a-trickle/"},{"content":" Setting up and maintaining a development environment is hard, especially when you need to destructively test features or libraries. Especially for contributing to a new project, you don\u0026rsquo;t know everything that is needed. Sometimes the install/development instructions assume some base tools or packages that are not included in your development environment of choice.\nIn come devcontainers. Rather than having to search through the README for a project you are wanting to contribute to, installing several packages onto your machine, and troubleshooting when it doesn\u0026rsquo;t work, you can simply open the repository as a devcontainer and you are ready to start contributing. Have a project that requires several separate services (databases, middleware/api server, etc.)? Create a devcontainer using docker-compose and your development environment can launch an entire suit of containers exactly how you need them.\nSetup Install Docker To be able to use containers, we need a container manager: Docker.\nTo get Docker installed, simply follow their instructions\nInstall VS Code To get Visual Studio Code (VS Code) installed, simply follow their instructions\nAdd container remote extension Within VS Code, install the Remote - Containers extension\nClick the Extensions sidebar (or use the \u0026ldquo;Ctrl + Shift + X\u0026rdquo; shortcut) Search for ms-vscode-remote.remote-containers Click \u0026ldquo;Install\u0026rdquo; Test It Out Now that you are ready to use a devcontainer, it is time to test it out!\nYou can grab this blog and use it as the devcontainer to play with. Click on the bottom left in VS Code on the green arrows, find the Container remote section, and select \u0026ldquo;Clone Repository in Container Volume\u0026hellip;\u0026rdquo;, enter https://github.com/jhollowe/blog and hit enter.\nAfter a minute or so of downloading and building your development container, VS Code will be fully functional. You can use the included tasks (Terminal \u0026gt; Run Task\u0026hellip; \u0026gt; Serve) to build and serve the blog. The devcontainer includes everything needed to build the blog and run VS Code. VS Code will even pull in common configuration for tools like Git and SSH.\nModes There are several \u0026ldquo;modes\u0026rdquo; of how to store your files in which you can use devcontainers, each with its own benefits and drawbacks.\n\u0026ldquo;mode\u0026rdquo; Pros Cons container volume * fast\n* fully self-contained environment * hard to access files from outside container mounting a directory * easy to get files in and out\n* allows stateful local files * slow file I/O\n* add/edits/deletes affect the source directory cloning a directory * as fast as a container volume\n* easy to get files into container\n* edits/deletes do not affect the source directory * hard to get files out of container ","description":"Setting up and maintaining a development environment is hard, especially when you need to destructively test features or libraries.","id":10,"section":"posts","tags":["development","containers"],"title":"Getting Started With Devcontainers","uri":"https://johnhollowell.com/blog/posts/getting-started-with-devcontainers/"},{"content":"For environments with complex Active Directory (AD) environments, AD forests can allow flexibility in management and organization of objects.\nBasically, an AD forest allows multiple domains and trees of domains (subdomains) to access and have a shared configuration while still having separate domains with separate host servers.\nThey allow domains to trust and access each other while still maintain separations and boarders. I\u0026rsquo;ve seen this used to allow corporate and client domains to communicate or to have a development domain tree that trust and can cross-talk with the production domain tree while still being separate (this is less common as dev domains are usually just subdomains within the production tree).\nResources\nhttps://en.wikipedia.org/wiki/Active_Directory#Forests,_trees,_and_domains https://ipwithease.com/what-is-a-forest-in-active-directory/ https://www.varonis.com/blog/active-directory-forest/ ","description":"Today I Learned about Active Directory Forests","id":11,"section":"posts","tags":["Active Directory"],"title":"TIL: AD Forests","uri":"https://johnhollowell.com/blog/posts/til-ad-forests/"},{"content":" Changing a user\u0026rsquo;s username on Linux requires no processes be running under that user. This makes sense, but what if we only have that user accessible through a SSh connection? What if we don\u0026rsquo;t want to allow external access to the root account? What if the root account doesn\u0026rsquo;t have a password?\nBackground I was recently spinning up a bunch of Raspberry Pis running Ubuntu 20.04 and some VPSes also running Ubuntu 20.04. I wanted to change the username on these nodes, but only really had access to the ubuntu (sudo) account. While I know I could use a cloud-init file to create a user exactly how I want (more on that in a future post), I didn\u0026rsquo;t want to re-flash the nodes and was not able to add a cloud-init file before boot on the VPSes.\nThe Process Getting The Commands To Run So we can\u0026rsquo;t change the username of a user with running processes, but a SSH session and a bash shell both run under my user whenever I\u0026rsquo;m connected.\nThe main problem is executing a command from a user (and sudo-ing to root) while not having that user have a process running.\nUsing either of the commands below allows a command to be run as the root user which will continue running\n1 2 3 4 5 # interactive shell sudo tmux # non-interactive command sudo -s -- sh -c \u0026#34;nohup \u0026lt;command\u0026gt; \u0026amp;\u0026#34; Now that we can have a command running as root independent of the initiating user, we need to kill everything of the user so we can run usermod commands without difficulty. We kill the processes and wait a couple seconds for them all to terminate. Then we can run whatever commands we need.\n1 ps -o pid= -u \u0026lt;current_username\u0026gt; | xargs kill \u0026amp;\u0026amp; sleep 2 \u0026amp;\u0026amp; \u0026lt;command\u0026gt; What This Command Does ps lists the processes running on the system -o pid= selects only the process ID (pid) and does not create a header for the column (=) -u \u0026lt;username\u0026gt; selects only the processes running under \u0026lt;username\u0026gt; | takes the output of the previous command (ps) and makes it the input of the following command (xargs) xargs takes a line separated list (can change the separator) and turns them into arguments for the following command (-r tells it to do nothing if its input is empty) kill takes a pid (or list of pids) and terminates the process. While kill can send different signals to processes, this uses the default signal (TERM). \u0026amp;\u0026amp; runs the following command if the preceding command exited successfully (exit code 0) sleep 2 wait 2 seconds for the killed processes to terminate Now, we can get to actually changing the username!\nChanging The Username Now that we can run commands as root without our user running processes, we can proceed to change the username and other related tasks.\nThese commands assume you are running as root. If not, you may need to insert some sudo\u0026rsquo;s as necessary\n1 2 3 4 5 6 7 8 9 10 11 # change the user\u0026#39;s username usermod -l \u0026lt;new_username\u0026gt; \u0026lt;current_username\u0026gt; # move the user\u0026#39;s home directory usermod -d /home/\u0026lt;new_username\u0026gt; -m \u0026lt;new_username\u0026gt; # change user\u0026#39;s group name groupmod -n \u0026lt;new_username\u0026gt; \u0026lt;current_username\u0026gt; # replace username in all sudoers files (DANGER!) sed -i.bak \u0026#39;s/\u0026lt;current_username\u0026gt;/\u0026lt;new_username\u0026gt;/g\u0026#39; /etc/sudoers for f in /etc/sudoers.d/*; do sed -i.bak \u0026#39;s/\u0026lt;current_username\u0026gt;/\u0026lt;new_username\u0026gt;/g\u0026#39; $f done Putting it all together When we put it all together (with some supporting script), we get change-username.sh as seen below:\n1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 #!/bin/bash currentUser=$1 newUser=$2 if [ $# -lt 2 ]; then printf \u0026#34;Usage:\\n\\t$0 \u0026lt;current_username\u0026gt; \u0026lt;new_username\u0026gt; [new_home_dir_path]\\n\u0026#34; exit 1 fi if [ $(id -u) -ne 0 ];then echo \u0026#34;Root permission needed for modifying users. Can not continue.\u0026#34; exit 2 fi newHome=\u0026#34;/home/$newUser\u0026#34; if [ $# == 3 ];then newHome=$3 fi echo \u0026#34;Changing $currentUser to $newUser\u0026#34; echo echo \u0026#34;Running this script has the possibility to break sudo (sudoers file(s)) and WILL kill all processes owned by $currentUser\u0026#34; echo \u0026#34;$currentUser will be logged out and will need to reconnect as $newUser\u0026#34; read -n1 -s -r -p $\u0026#39;Continue [Y/n]?\\n\u0026#39; key if [ $key != \u0026#39;\u0026#39; -a $key != \u0026#39;y\u0026#39; -a $key != \u0026#39;Y\u0026#39; ]; then echo \u0026#34;Stopping; no files changed\u0026#34; exit 2 fi # put the main script in /tmp so the user\u0026#39;s home directory can be safely moved tmpFile=$(mktemp) cat \u0026gt; $tmpFile \u0026lt;\u0026lt; EOF #!/bin/bash shopt -s extglob # terminate (nicely) any process owned by $currentUser ps -o pid= -u $currentUser | xargs -r kill # wait for all processes to terminate sleep 2 # forcibly kill any processes that have not already terminated ps -o pid= -u $currentUser | xargs -r kill -s KILL # change the user\u0026#39;s username usermod -l \u0026#34;$newUser\u0026#34; \u0026#34;$currentUser\u0026#34; # move the user\u0026#39;s home directory usermod -d \u0026#34;$newHome\u0026#34; -m \u0026#34;$newUser\u0026#34; # change user\u0026#39;s group name groupmod -n \u0026#34;$newUser\u0026#34; \u0026#34;$currentUser\u0026#34; # replace username in all sudoers files sed -i.bak \u0026#39;s/\u0026#39;$currentUser\u0026#39;/\u0026#39;$newUser\u0026#39;/g\u0026#39; /etc/sudoers for f in /etc/sudoers.d/!(*.bak); do echo \u0026#34;editing \u0026#39;\\$f\u0026#39;\u0026#34; sed -i.bak \u0026#39;s/\u0026#39;$currentUser\u0026#39;/\u0026#39;$newUser\u0026#39;/g\u0026#39; \\$f # TODO fix $f not getting the file path for some reason done EOF echo \u0026#34;Putting script into $tmpFile and running\u0026#34; chmod 777 $tmpFile sudo -s -- bash -c \u0026#34;nohup $tmpFile \u0026gt;/dev/null \u0026amp;\u0026#34; ``` \u0026lt;!-- markdownlint-disable-file --\u0026gt; requirements Command(s) Package bash bash ps, kill procps usermod, groupmod passwd sed sed xargs findutils ","description":"Changing a user's username on Linux requires no processes be running under that user. This makes sense, but what if we only have that user accessible through a SSh connection?","id":12,"section":"posts","tags":["sysadmin"],"title":"Change Username Without Separate Session","uri":"https://johnhollowell.com/blog/posts/change-username-without-separate-session/"},{"content":"One of the most important parts of a working cluster is the interconnection and communication between nodes. While the networking side will not be covered now, a very important aspect will be: passwordless SSH.\nInter-node SSH The first task to getting easy access between nodes is ensuring SSH access between all the nodes.\nWhile not necessary, I recommend adding all your nodes to the /etc/hosts file on each node. For example, the /etc/hosts file might look like\n1 2 3 4 5 6 7 8 9 127.0.0.1 localhost # The following lines are desirable for IPv6 capable hosts ::1 ip6-localhost ip6-loopback fe00::0 ip6-localnet ff00::0 ip6-mcastprefix ff02::1 ip6-allnodes ff02::2 ip6-allrouters ff02::3 ip6-allhosts to which I would add (using the actual IPs of the nodes)\n1 2 3 4 192.168.0.11 node01 192.168.0.12 node02 192.168.0.13 node03 192.168.0.14 node04 Automate adding to your hosts files 1 2 3 4 5 6 7 8 9 for node in localhost node02 node03 node04; do ssh $node \u0026#34;cat | sudo tee -a /etc/hosts \u0026gt; /dev/null\u0026#34; \u0026lt;\u0026lt; EOF 192.168.0.11 node01 192.168.0.12 node02 192.168.0.13 node03 192.168.0.14 node04 EOF done After this is added to your hosts file on all your nodes, from any node you should be able to ssh node1 from any of them successfully after entering your password.\nNOTE: if you have not configured static IP addresses for your nodes, any changes to their IPs will require you changing the hosts file on all your nodes. Passwordless SSH To be able to SSH between nodes without the need for a password, you will need to create an SSH key. This will allow SSH to work in scripts and tools (MPI) without needing user interaction.\nFirst, we need to create a key. There are multiple standards of encryption you can use for SSH keys. The default is RSA, but it is generally considered to be less secure than modern standards. Therefore, these instructions will show how to create a ed25519 key. This will work on your cluster, but some (very) old systems may not support ED25519 keys (RSA keys will generally work everywhere even though they are less secure).\nTo create a key, use this command on one of your nodes:\n1 ssh-keygen -t ed25519 -a 100 -f ~/.ssh/id_ed25519 -C \u0026#34;Inter-node cluster ssh\u0026#34; This article does a good job of breaking down what all the arguments are used for.\nNext, we need our nodes to trust the key we just created. We\u0026rsquo;ll start with getting the current node to trust the key.\n1 ssh-copy-id -i ~/.ssh/id_ed25519 localhost NOTE: If you have already setup NFS with a shared home directory, you don\u0026rsquo;t need to do anything further; the key is accessible and trusted on all the nodes. Now we can just copy these files to all the other nodes so that they can use and will trust this key.\n1 2 3 4 5 for node in node02 node03 node04; do # list all the nodes that should get the key ssh-copy-id -i ~/.ssh/id_ed25519 $node # you will need to enter your password for this step scp ~/.ssh/id_ed25519 $node:.ssh/ ssh $node \u0026#34;chmod 600 ~/.ssh/id_ed25519\u0026#34; # ensure the key is locked down so SSH will accept it. done And to make all the nodes trust each other\u0026rsquo;s fingerprints\n1 2 3 for node in node02 node03 node04; do scp ~/.ssh/known_hosts $node:.ssh/ done We can check that we can SSH into all the nodes without having to enter a password:\n1 2 for node in node2 node3 node4; do ssh $node \u0026#34;hostname\u0026#34; ","description":"One of the most important parts of a working cluster is the interconnection and communication between nodes. While the networking side will not be covered now, a very important aspect will be: passwordless SSH.","id":13,"section":"posts","tags":["SSH","cluster","networks"],"title":"Cluster SSH","uri":"https://johnhollowell.com/blog/posts/cluster-ssh/"},{"content":" So you want to build a Raspberry Pi cluster.\nThe first thing to do is determine the size of a cluster you want to build. You can go with any number greater than one, but I\u0026rsquo;ve found that 4-8 is a good sweet spot between too few nodes to get a real feel of cluster operation and too many nodes to manage and maintain. For this and following posts, I will be assuming a cluster of 4 nodes (node01 to node04).\nHardware To run a cluster you also need some supporting hardware, where N is the number of nodes (examples given as links):\nN Raspberry Pi 4 N Micro SD Cards (16GB or more preferred) 1 gigabit ethernet switch (at least N+1 ports) OR router with N LAN ports (see the Networking section below) N short \u0026ldquo;patch\u0026rdquo; ethernet cables Power Supply (choose one) N USB C power supplies N/4 4-port USB power supplies with N USB C cables N/4 BitScope Quattro Raspberry Pi blades and power supply 1 USB Drive [optional] 1 4-slot case (with heatsinks) [optional] 1 power strip [optional] While you can use older models of the Pi if you already have them, using the most recent version will provide the most performance at the same price. Just make sure you get power cables that are compatible with your nodes.\nYou can also use larger RAM versions, but any amount of RAM should work for a minimally functional cluster. The more memory on your nodes, the larger problems they can solve and more performant they can be (caches for network and local storage and a reduction in swappiness).\nPut together the nodes If you got the BitScope Quattro for power or a case for your Pis, you will want to to get your Pis in place. This is also a great time to put on any heatsinks you have for your Pis.\nI would also recommend taking this time to decide the identity of each node and labeling them with a number or other identifier. I\u0026rsquo;ve decided to use numbers to identify my nodes, so I will use a marker or label to indicate which node is which number. This makes troubleshooting easier later on.\nConnect the wires Once your Pis are all ready to go, we need to connect them to power and network. It is useful to connect power and network cables in the order of the Pis so troubleshooting is easier when something goes wrong. Be sure to make sure all the cables are fully inserted.\nNetworking Connections For networking, you can take two paths:\nUse just a switch and connect the cluster to your home network Use a switch and/or a router to create a dedicated sub-network for your cluster. (You can use a switch to connect more nodes to your router if you have run out of ports on it) I\u0026rsquo;ll be doing the second option as it give better separation from my other devices and allows me to set private IP addresses for my nodes regardless the IPs already in use on my home network.\nRegardless the path your choose, you will need to connect your switch or router\u0026rsquo;s WAN port to your home network so your cluster can access the internet and you can access your nodes. (You could also have your cluster completely air-gapped and use static IPs on the nodes, but not being able to download applications and tools is in my opinion not worth the effort).\nSoftware For this cluster I will be using Ubuntu. Canonical ( the company behind Ubuntu) has done a great job of ensuring Ubuntu is stable on Raspberry Pis (with the help of software from the Raspberry Pi Foundation) and has a 64 bit version available (unlike Raspberry Pi OS as of the time of writing). I will be using 20.04, but the latest LTS version should be fine.\nThere is already a great tutorial on how to install Ubuntu on a Raspberry Pi. Make sure to select the latest LTS version with 64 bit support. Also, we have no need to install a desktop, so you can skip that step.\nConnecting to the nodes If you followed the above tutorial, you should have the IP address of all your nodes. If you can\u0026rsquo;t tell which IP goes to which node, try unplugging the network cables from all but one node, follow the instructions, and repeat for all the other nodes. If you are using a router for your cluster, make sure you are connected to its network (its WiFi or LAN port) and not your home network as the router will block connections from your home network into your cluster network. (if you want, you can create a port forward on your cluster router for port 22 to your so you can SSH into)\nOnce you know what node is what IP address, connect to the first node (which we will use as our head node). Try running ping 1.1.1.1 to ensure your node can connect to the internet. Then follow the cluster SSH guide to setup SSH between all your nodes.\nStatic IP addresses No matter if you have a dedicated cluster network or it is connected to your home network, you should configure static IP addresses for all your nodes so their addresses will not change accidentally in the future.\nPackages In future posts we will install needed packages for configuring our cluster operation, but below are some useful packages that can help with troubleshooting and analyzing cluster performance.\nDon\u0026rsquo;t forget to sudo apt update to make sure you have the latest package database.\nhtop iftop iotop dstat pv ","description":"The basics of getting a cluster of Raspberry Pis powered on and running. Full cluster configuration in later posts.","id":14,"section":"posts","tags":["cluster","networks","hardware"],"title":"Basic Cluster Setup","uri":"https://johnhollowell.com/blog/posts/basic-cluster-setup/"},{"content":"Clemson\u0026rsquo;s School of Computing (SoC) is the place at Clemson where Computer Science (CPSC), Computer Information Systems (CIS), and Digital Production Arts (DPA) are located. Other computing departments (like Computer Engineering) also use some of the SoC\u0026rsquo;s systems. Below are some useful tips and tools for quickly getting going in the SoC.\nAccess Servers The access servers are the way you can access all the SoC computers from off-campus (without having to use the VPN). You can SSH into them and then SSH into other computers through access (or anything else you can do through SSH). You can connect to the access servers using ssh \u0026lt;clemson_username\u0026gt;@access.computing.clemson.edu (or just ssh access.computing.clemson.edu if you computer\u0026rsquo;s username matches your Clemson username). When you connect, you will see a list of lab computers that you can then connect to by using their name (e.g. ssh babbage1). You can also use access2.computing.clemson.edu if the main access server is down or overloaded.\nIf you are on campus, you can directly access the lab computers without the need to go through the access server. Simply use ssh \u0026lt;computer_name\u0026gt;.computing.clemson.edu while on campus (or VPN) and you can directly connect to the machine.\nNOTE: There is a limit in place on the number of connections for each user connecting to the access server. I\u0026rsquo;ve found it to be 4 connections. If you need more connections, consider using both access and access2 or using SSH Multiplexing. Files on the lab computers All the lab computers share your home directory. This means that if you write a file on one computer, you can access it on any other lab computer. This also means your settings for most programs will be the same on all the computers.\nThis also means you can access these files from your own computer as a network drive. Check out these instructions for more information on the subject (use the linux share instructions).\nSSH between computers SSHing between the lab machines can be a bit of a pain when you have to enter your password every time. It also makes it harder to write scripts that use multiple lab computers to work on rendering a project or running some processing. However, if you set up SSH keys on the computers, it allows the lab machines to connect to each other without the need for a password. And since the lab computers share files, once SSH keys are setup on one system, the will work on all the systems.\nThe process of making the keys we will use is fairly straight forward. You can check out more information on what these commands do if you are interested.\n1 2 ssh-keygen -t ed25519 -a 100 -f ~/.ssh/id_ed25519 -C \u0026#34;School of Computing\u0026#34; ssh-copy-id -i ~/.ssh/id_ed25519 localhost This will generate a key for the computers to use, and \u0026ldquo;install\u0026rdquo; it so they will accept connections from that key. Since all the computers have the needed files due to the shared filesystem, all the computers now trust connections from all the other computers.\nSnapshot folder Oh no! You just deleted all the files for your assignment! Not to worry.\nYou home directory (/home/\u0026lt;username\u0026gt;/) on the SoC computers is backed up for just such a problem. Within every folder in your home directory is a hidden folder named .snapshot. It will not appear in any listing of directories, but if you cd into it, you can access all the different backups that are available. You can ls ~/.snapshot/ to see all the different dates that are have backups (there are hourly, daily, and weekly backups). These backup files are read-only, so you will need to copy them back into your home directory to be able to edit them.\nTo access and recover your files, you can either do\n1 2 3 cd ~ cd .snapshot/daily.1234-56-78_0010/path/to/your/files/ cp very-important-file.txt ~/path/to/your/files/ OR\n1 2 3 cd ~/path/to/your/files/ cd .snapshot/daily.1234-56-78_0010 cp very-important-file.txt ~/path/to/your/files/ Teachers\u0026rsquo; Office Hours While is isn\u0026rsquo;t really a technology in the SoC, your teachers are one of best resources to gain knowledge and software development skills. After all, the aren\u0026rsquo;t called teachers for nothing.\nAll teachers are required to have office hours (and so are Teaching Assistants (TAs)). Make use of this time to get to know your teacher, ask questions, and learn more about topics that excite you. It is also a good idea to start projects early (I\u0026rsquo;m not saying I ever did this, but it is what I should have done) so you can ask the teacher questions in office hours before everyone else starts to cram the assignment and office hours get busy.\nYOUR SUGGESTION HERE Is there something you really liked or have often used that you think I should add here or in another post? Get in contact with me and let me know!\n","description":"Clemson's School of Computing can be complicated. Here are some tips and tricks to get started quickly and make the most of the resources you have.","id":15,"section":"posts","tags":["clemson"],"title":"Clemson SoC 101","uri":"https://johnhollowell.com/blog/posts/clemson-soc-101/"},{"content":" Clemson University’s computer labs store files across all the computers using network shares. You usually just access these shares on the lab machines, but you can also add the shares on your own computer as a network drive.\nThere are two main shares on campus: the campus share used by all the Windows (and Mac?) lab machines (e.g. in Cooper Library, Martin, etc.) and the School of Computing’s Linux systems. Both systems can be accessed in a similar way, but with different settings.\nTo access these network shares, you must either be on campus internet (WiFi or Ethernet) or have the Clemson VPN installed and activated on your device. See the CCIT guide for VPN access for more information. The following instructions assume you are using a Windows device to access the shares. Using the credentials as below, you can follow a guide for adding network drives on Mac OS X or Linux (Ubuntu)\nSteps Open File Explorer and go to \u0026ldquo;This PC\u0026rdquo;. Click \u0026ldquo;Map Network Drive\u0026rdquo; in the top ribbon. Choose what drive letter you want the share to appear as (it doesn’t matter what you choose for this; I used \u0026ldquo;Z\u0026rdquo; for this example) Linux Share Windows Share Enter \\\\neon.cs.clemson.edu\\home into the \u0026ldquo;folder\u0026rdquo; box. 5. Check both \"Reconnect as sign-in\" and \"Connect using different credentials\" so the network drive will automatically connect and you can use your Clemson credentials (rather than your local device’s username and password). Click \"Finish\". 6. Enter your University username (with @clemson.edu) and password. (You might have to click \"more choices\" in the login window to be able to enter a new username/password.) \u0026lt;img data-src=\u0026quot;https://johnhollowell.com/blog/posts/accessing-your-clemson-network-shares/neon_creds.png\u0026quot; alt=\u0026quot;example login credentials for neon.cs.clemson.edu\u0026quot; data-caption=\u0026quot;\u0026quot; src=\u0026quot;data:image/svg+xml,%0A%3Csvg xmlns='http://www.w3.org/2000/svg' width='50%25' height='' viewBox='0 0 24 24'%3E%3Cpath fill='none' d='M0 0h24v24H0V0z'/%3E%3Cpath fill='%23aaa' d='M19 3H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zm-1 16H6c-.55 0-1-.45-1-1V6c0-.55.45-1 1-1h12c.55 0 1 .45 1 1v12c0 .55-.45 1-1 1zm-4.44-6.19l-2.35 3.02-1.56-1.88c-.2-.25-.58-.24-.78.01l-1.74 2.23c-.26.33-.02.81.39.81h8.98c.41 0 .65-.47.4-.8l-2.55-3.39c-.19-.26-.59-.26-.79 0z'/%3E%3C/svg%3E\u0026quot; class=\u0026quot;lazyload\u0026quot; style=\u0026quot;width:50%;height:;\u0026quot;/\u0026gt; 7. Click \"OK\". Your School of Computing home directory should now appear under the drive letter you chose. NOTE: When adding new files via the network share, they are created with permissions defined by your umask. You can use chmod xxx \u0026lt;file\u0026gt; to change a files permissions to xxx (view a chmod guide for more information on the chmod command) Enter \\\\home.clemson.edu\\\u0026lt;username\u0026gt; where \u0026lt;username\u0026gt; is your university username. 5. Check both \"Reconnect as sign-in\" and \"Connect using different credentials\" so the network drive will automatically connect and you can use your Clemson credentials (rather than your local device’s username and password). Click \"Finish\". 6. Enter your University username (without @clemson.edu) and password. (You might have to click \"more choices\" in the login window to be able to enter a new username/password.) \u0026lt;img data-src=\u0026quot;https://johnhollowell.com/blog/posts/accessing-your-clemson-network-shares/home_creds.png\u0026quot; alt=\u0026quot;example login credentials for home.clemson.edu\u0026quot; data-caption=\u0026quot;\u0026quot; src=\u0026quot;data:image/svg+xml,%0A%3Csvg xmlns='http://www.w3.org/2000/svg' width='50%25' height='' viewBox='0 0 24 24'%3E%3Cpath fill='none' d='M0 0h24v24H0V0z'/%3E%3Cpath fill='%23aaa' d='M19 3H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zm-1 16H6c-.55 0-1-.45-1-1V6c0-.55.45-1 1-1h12c.55 0 1 .45 1 1v12c0 .55-.45 1-1 1zm-4.44-6.19l-2.35 3.02-1.56-1.88c-.2-.25-.58-.24-.78.01l-1.74 2.23c-.26.33-.02.81.39.81h8.98c.41 0 .65-.47.4-.8l-2.55-3.39c-.19-.26-.59-.26-.79 0z'/%3E%3C/svg%3E\u0026quot; class=\u0026quot;lazyload\u0026quot; style=\u0026quot;width:50%;height:;\u0026quot;/\u0026gt; 7. Click \"OK\". Your Windows home directory should now appear under the drive letter you chose. You now have access to your files as if they were just another drive in your computer. Do note that these drives will be significantly slower than your actual computer drives due to higher latency and lower bandwidth.\n","description":"Clemson University’s computer labs store files across all the computers using network shares. You usually just access these shares on the lab machines, but you can also add the shares on your own computer as a network drive.","id":16,"section":"posts","tags":["clemson"],"title":"Accessing Your Clemson Network Shares","uri":"https://johnhollowell.com/blog/posts/accessing-your-clemson-network-shares/"},{"content":" Hey There! I\u0026rsquo;m John. I enjoy coding and problem solving. On the side I do some photography and videography work.\nCheck out my main website for more information about me and to get in contact.\n","description":"","id":17,"section":"","tags":null,"title":"About","uri":"https://johnhollowell.com/blog/about/"},{"content":" I\u0026rsquo;m at my extended family\u0026rsquo;s house way out in the middle of nowhere; barely enough cellular connection for an SMS, let alone trying to use any data.\nThey have DSL, but they are so far out that the signal is poor and it also is horrible speed. The fastest I saw while I was there was 700Kbps.\nWhile it is always a shock to go from over 100Mbps to under 1Mbps, I think that we are in an age where low bandwidth is not a show-stopper. Now obviously, downloading large files and games is a lot more tedious, I have found the \u0026ldquo;set everything to download overnight\u0026rdquo; method works quite well.\nI think there are three main reason why you can do more with less bandwidth than ever before.\nCompression and Codecs We have reached the point where processing power is so cheap, most of the time everything else is the limitation. We are glad to spend some power and time compressing data if it means we have more storage space on our devices or use less data. Website analysis tools will now complain if a webserver doesn\u0026rsquo;t compress its responses with at least gzip.\nWe are (slowly) starting to use new video and audio codecs that compress the crap out of the video/audio stream. Many devices are even starting to have highly performant hardware acceleration for these formats so it doesn\u0026rsquo;t even cause high load or power draw on mobile devices. Services like YouTube automatically convert content to many different qualities and have algorithms to pick the best quality that you can support.\nCaches, CDNS, and Apps Every web browser has a cache. Many even have several tiers of cache to give good hit/miss ratios and speed. If you are going to Facebook, you really should only ever need to receive the logo, most styles, and even some content once. This not only helps on slow connections, but even on fast connections an additional resource request can take a (relatively) long time to do an entire TCP and SSL handshake transaction.\nA further performance increase can be gained through websites\u0026rsquo; use of CDNs for their libraries and assets. If you are loading jQuery, FontAwesome, or bootstrap from local, you are doing it wrong. Pulling these assets from a CDN not only reduces the load on your server and the latency of the client accessing the resource, but allows caching these common resource between sites. If you visit a site using version x of the y library and then visit another site that uses the same version of y, you should be able to cache the first request of that resource and reuse it for any subsequent pages in any site. You can only do this if you using a CDN (and the same, but realistically most resources either have their own CDN or use one of the most common CDNs that everyone else uses).\nAdditionally, the use of site-specific apps (while annoying) allows the apps to only pull new content and \u0026ldquo;cache\u0026rdquo; all the resources needed to display the app. This makes it assured that outside of app updates, all most of the app\u0026rsquo;s traffic is the content you want to see (or ads sigh).\nMobile Focused Pages Thanks the the horrible practices of the Cellular Companies, anything that is loaded on a cellular connection needs to be small to not use much data to fit within limited bandwidth and even more limited data caps. While I have a great distaste for the stupidity of Cell carriers, their limitations have forced encouraged developments in efficient compression and transmission of pages (as well as a lot of bad practices in lazy loading and obfuscating in the name of minifying). Mosts sites will load smaller or more compressed assets when they detect they are on mobile platforms.\nCaveats While I did \u0026ldquo;survive\u0026rdquo; on the limited connection, I knew it was coming and was able to prepare a bit for it. I downloaded a couple of additional playlists on Spotify and synced a few episodes of TV to my phone from my Plex. However, I did not even use these additional downloads. I used the podcasts I had previously downloaded and even downloaded an additional episode while there. The ability in most apps to download content makes even a trickle of internet be enough to slowly build up the content you want.\nI have also recently reset my laptop and had to download FFmpeg while there. It took a few minutes, but it didn\u0026rsquo;t fail. I did want to do some complex computing while there, but since most of what I do is on other computers (servers, remote machines, etc) it was incredibly easy to do what I wanted to do through an SSH connection to a datacenter. This is cheating a little bit but really is not out of the ordinary; even on fast internet I would SSH out to do things I didn\u0026rsquo;t want or couldn\u0026rsquo;t do on my device (thanks Windows). This not not that different from devices like Chromebooks which almost entirely run remotely and require an internet connection to function (or function with all features).\nThis was also a family gathering, so I didn\u0026rsquo;t spend much time on the internet. I could quickly google the answer to win an argument and that was all I needed.\nConclusion Slow internet is still a pain, but I\u0026rsquo;ve grown to appreciate its limitations and work around them. Several trends in computing and content delivery in recent years have made slow internet more bearable. I won\u0026rsquo;t be giving up my high-speed internet any time soon, but slowing down and disconnecting a bit is a nice change of pace in this time where everything has to happen online.\n","description":"While it is always a shock to go from over 100Mbps to under 1Mbps, I think that we are in an age where low bandwidth is not a show-stopper.","id":18,"section":"posts","tags":["web","life"],"title":"A Trickle Is Better Than Nothing","uri":"https://johnhollowell.com/blog/posts/a-trickle-is-better-than-nothing/"},{"content":"2021. A new year; a new start.\nI\u0026rsquo;ve wanted to start a blog for a while, and I thought I might as well start it on the first of the year. I think I finally have enough things I want to talk about that a blog is worth the effort.\nWhat\u0026rsquo;s in a Name? So why the name \u0026ldquo;/dev/random\u0026rdquo;? Well, I\u0026rsquo;m a geek and this blog will be about anything. I don\u0026rsquo;t want to confine this blog to any one subject (including to just tech) and I want the entirety of the blog to be representative of that. It also give me the opportunity to have a punny subtitle, which I am always appreciative of.\nSo\u0026hellip; Why? This blog is mostly a place for me to put information for my future self and others. Don\u0026rsquo;t expect any deep, rambling prose. I\u0026rsquo;m not a spectacular writer and there are many things in my life that don\u0026rsquo;t merit blogging about. However, I have a very wide range of knowledge which I often will forget by the next time I need to use it. This gives me a way to record my experiences and experiments in a public place to which I can reference others. This blog is also an experiment, how meta is that?\nWhen can I get more of this great content? I would like to at least work on this blog every day. That doesn\u0026rsquo;t mean a new post every month; longer and more detailed posts will take me a bit longer. I might hold a post so a whole series can be release together. I might get bored and never create another post. Who knows?\n","description":"I've wanted to start a blog for a while, and I thought I might as well start it on the first of the year. I think I finally have enough things I want to talk about that a blog is worth the effort.","id":19,"section":"posts","tags":null,"title":"And So It Begins","uri":"https://johnhollowell.com/blog/posts/and-so-it-begins/"}] \ No newline at end of file diff --git a/categories/index.xml b/categories/index.xml index e7a407e..8114fbf 100644 --- a/categories/index.xml +++ b/categories/index.xml @@ -1 +1 @@ -Categories on /dev/random: A Bit of Everythinghttps://johnhollowell.com/blog/categories/Recent content in Categories on /dev/random: A Bit of EverythingHugo -- gohugo.ioencontact@johnhollowell.com (John Hollowell)contact@johnhollowell.com (John Hollowell)©2023 John Hollowell, All Rights ReservedSun, 24 Dec 2023 01:32:12 +0000story timehttps://johnhollowell.com/blog/categories/story-time/Sun, 24 Dec 2023 01:32:12 +0000contact@johnhollowell.com (John Hollowell)https://johnhollowell.com/blog/categories/story-time/John Hollowellworkhttps://johnhollowell.com/blog/categories/work/Sun, 24 Dec 2023 01:32:12 +0000contact@johnhollowell.com (John Hollowell)https://johnhollowell.com/blog/categories/work/John Hollowellreviewhttps://johnhollowell.com/blog/categories/review/Mon, 17 Jul 2023 22:55:22 +0000contact@johnhollowell.com (John Hollowell)https://johnhollowell.com/blog/categories/review/John Hollowellconferencehttps://johnhollowell.com/blog/categories/conference/Thu, 24 Nov 2022 16:58:11 +0000contact@johnhollowell.com (John Hollowell)https://johnhollowell.com/blog/categories/conference/John Hollowellguidehttps://johnhollowell.com/blog/categories/guide/Wed, 11 Aug 2021 05:02:34 +0000contact@johnhollowell.com (John Hollowell)https://johnhollowell.com/blog/categories/guide/John Hollowellnetworkshttps://johnhollowell.com/blog/categories/networks/Sat, 26 Jun 2021 17:02:39 +0000contact@johnhollowell.com (John Hollowell)https://johnhollowell.com/blog/categories/networks/John Hollowell101https://johnhollowell.com/blog/categories/101/Fri, 25 Jun 2021 20:06:55 +0000contact@johnhollowell.com (John Hollowell)https://johnhollowell.com/blog/categories/101/John Hollowellclemsonhttps://johnhollowell.com/blog/categories/clemson/Sun, 07 Feb 2021 14:08:51 -0500contact@johnhollowell.com (John Hollowell)https://johnhollowell.com/blog/categories/clemson/John Hollowellmetahttps://johnhollowell.com/blog/categories/meta/Fri, 01 Jan 2021 15:00:25 -0500contact@johnhollowell.com (John Hollowell)https://johnhollowell.com/blog/categories/meta/John Hollowell \ No newline at end of file +Categories on /dev/random: A Bit of Everythinghttps://johnhollowell.com/blog/categories/Recent content in Categories on /dev/random: A Bit of EverythingHugo -- gohugo.ioencontact@johnhollowell.com (John Hollowell)contact@johnhollowell.com (John Hollowell)©2023 John Hollowell, All Rights ReservedSun, 24 Dec 2023 01:32:12 +0000workhttps://johnhollowell.com/blog/categories/work/Sun, 24 Dec 2023 01:32:12 +0000contact@johnhollowell.com (John Hollowell)https://johnhollowell.com/blog/categories/work/John Hollowellreviewhttps://johnhollowell.com/blog/categories/review/Mon, 17 Jul 2023 22:55:22 +0000contact@johnhollowell.com (John Hollowell)https://johnhollowell.com/blog/categories/review/John Hollowellconferencehttps://johnhollowell.com/blog/categories/conference/Thu, 24 Nov 2022 16:58:11 +0000contact@johnhollowell.com (John Hollowell)https://johnhollowell.com/blog/categories/conference/John Hollowelllifehttps://johnhollowell.com/blog/categories/life/Sat, 18 Dec 2021 21:44:42 +0000contact@johnhollowell.com (John Hollowell)https://johnhollowell.com/blog/categories/life/John Hollowellopinionhttps://johnhollowell.com/blog/categories/opinion/Sat, 18 Dec 2021 21:44:42 +0000contact@johnhollowell.com (John Hollowell)https://johnhollowell.com/blog/categories/opinion/John Hollowellguidehttps://johnhollowell.com/blog/categories/guide/Wed, 11 Aug 2021 05:02:34 +0000contact@johnhollowell.com (John Hollowell)https://johnhollowell.com/blog/categories/guide/John Hollowellnetworkshttps://johnhollowell.com/blog/categories/networks/Sat, 26 Jun 2021 17:02:39 +0000contact@johnhollowell.com (John Hollowell)https://johnhollowell.com/blog/categories/networks/John Hollowell101https://johnhollowell.com/blog/categories/101/Fri, 25 Jun 2021 20:06:55 +0000contact@johnhollowell.com (John Hollowell)https://johnhollowell.com/blog/categories/101/John Hollowellmetahttps://johnhollowell.com/blog/categories/meta/Fri, 01 Jan 2021 15:00:25 -0500contact@johnhollowell.com (John Hollowell)https://johnhollowell.com/blog/categories/meta/John Hollowell \ No newline at end of file diff --git a/categories/life/index.html b/categories/life/index.html new file mode 100644 index 0000000..4d96e09 --- /dev/null +++ b/categories/life/index.html @@ -0,0 +1,83 @@ +life – /dev/random: A Bit of Everything +
life
Masters Degree Takeaways
Masters Degree Takeaways
· ☕ 4 min read
This December I graduated from Clemson University with my Masters degree in Computer Science. There were a lot of things I learned (obviously), but I think the most useful things I learned were not from the course material, but my outside learning and interactions with peers and professors.


\ No newline at end of file diff --git a/categories/life/index.xml b/categories/life/index.xml new file mode 100644 index 0000000..9f91cd7 --- /dev/null +++ b/categories/life/index.xml @@ -0,0 +1,3 @@ +life on /dev/random: A Bit of Everythinghttps://johnhollowell.com/blog/categories/life/Recent content in life on /dev/random: A Bit of EverythingHugo -- gohugo.ioencontact@johnhollowell.com (John Hollowell)contact@johnhollowell.com (John Hollowell)©2023 John Hollowell, All Rights ReservedSat, 18 Dec 2021 21:44:42 +0000Masters Degree Takeawayshttps://johnhollowell.com/blog/posts/masters-degree-takeaways/Sat, 18 Dec 2021 21:44:42 +0000contact@johnhollowell.com (John Hollowell)Sat, 18 Dec 2021 21:44:42 +0000https://johnhollowell.com/blog/posts/masters-degree-takeaways/This December I graduated from Clemson University with my Masters degree in Computer Science. There were a lot of things I learned (obviously), but I think the most useful things I learned were not from the course material, but my outside learning and interactions with peers and professors. +Project Organization All my courses this semester were project based; while some of the grade for the course came from quizzes or homeworks, over 50% came from a semester long project.John Hollowellfeatured imageclemsonlifeopinionNothing Is Definitely Worse Than a Tricklehttps://johnhollowell.com/blog/posts/nothing-is-definitely-worse-than-a-trickle/Sat, 26 Jun 2021 17:02:39 +0000contact@johnhollowell.com (John Hollowell)Sat, 26 Jun 2021 17:02:39 +0000https://johnhollowell.com/blog/posts/nothing-is-definitely-worse-than-a-trickle/Please read A Trickle Is Better Than Nothing before reading this post. I just got over having no internet at my apartment for over a week. I was gone a portion of the week, but it was still very inconvenient. Working remotely doesn’t help as to get paid I need to have an internet connection (but not necessarily a fast connection). +Working Around It While I could have use cellular data to carry me through, I had already used a significant portion of my data cap on various travels this summer.John Hollowellfeatured imageweblifeopinionnetworks \ No newline at end of file diff --git a/categories/life/page/1/index.html b/categories/life/page/1/index.html new file mode 100644 index 0000000..9f0d131 --- /dev/null +++ b/categories/life/page/1/index.html @@ -0,0 +1,2 @@ +https://johnhollowell.com/blog/categories/life/ + \ No newline at end of file diff --git a/categories/meta/index.html b/categories/meta/index.html index 97c7ba2..9b456f0 100644 --- a/categories/meta/index.html +++ b/categories/meta/index.html @@ -21,6 +21,8 @@ 1 backup 1 +clemson +3 cloud 1 cluster @@ -29,18 +31,18 @@ 1 development 1 +gaming +1 hardware -3 +4 life -6 +4 Linux 1 memory 1 networks 3 -opinion -3 proxmox 1 SSH @@ -53,20 +55,20 @@ 1
Series framework diff --git a/categories/networks/index.xml b/categories/networks/index.xml index fa104dd..24ea6a4 100644 --- a/categories/networks/index.xml +++ b/categories/networks/index.xml @@ -3,4 +3,4 @@ Working Around It While I could have use cellular data to carry me through, I ha Inter-node SSH The first task to getting easy access between nodes is ensuring SSH access between all the nodes. While not necessary, I recommend adding all your nodes to the /etc/hosts file on each node. For example, the /etc/hosts file might look likeJohn Hollowellfeatured imageSSHclusternetworksguidenetworksRaspberry Pi ClusterA Trickle Is Better Than Nothinghttps://johnhollowell.com/blog/posts/a-trickle-is-better-than-nothing/Sat, 02 Jan 2021 21:31:47 -0500contact@johnhollowell.com (John Hollowell)Sat, 02 Jan 2021 21:31:47 -0500https://johnhollowell.com/blog/posts/a-trickle-is-better-than-nothing/I’m at my extended family’s house way out in the middle of nowhere; barely enough cellular connection for an SMS, let alone trying to use any data. They have DSL, but they are so far out that the signal is poor and it also is horrible speed. The fastest I saw while I was there was 700Kbps. -While it is always a shock to go from over 100Mbps to under 1Mbps, I think that we are in an age where low bandwidth is not a show-stopper.John Hollowellweblifeopinionnetworks \ No newline at end of file +While it is always a shock to go from over 100Mbps to under 1Mbps, I think that we are in an age where low bandwidth is not a show-stopper.John Hollowellweblifenetworksopinion \ No newline at end of file diff --git a/categories/clemson/index.html b/categories/opinion/index.html similarity index 85% rename from categories/clemson/index.html rename to categories/opinion/index.html index faa195a..3bb482b 100644 --- a/categories/clemson/index.html +++ b/categories/opinion/index.html @@ -1,5 +1,5 @@ -clemson – /dev/random: A Bit of Everything -
clemson
Accessing Your Clemson Network Shares
· ☕ 3 min read
Clemson University’s computer labs store files across all the computers using network shares. You usually just access these shares on the lab machines, but you can also add the shares on your own computer as a network drive.

\ No newline at end of file +2
\ No newline at end of file diff --git a/categories/opinion/index.xml b/categories/opinion/index.xml new file mode 100644 index 0000000..cba64bc --- /dev/null +++ b/categories/opinion/index.xml @@ -0,0 +1,5 @@ +opinion on /dev/random: A Bit of Everythinghttps://johnhollowell.com/blog/categories/opinion/Recent content in opinion on /dev/random: A Bit of EverythingHugo -- gohugo.ioencontact@johnhollowell.com (John Hollowell)contact@johnhollowell.com (John Hollowell)©2023 John Hollowell, All Rights ReservedSat, 18 Dec 2021 21:44:42 +0000Masters Degree Takeawayshttps://johnhollowell.com/blog/posts/masters-degree-takeaways/Sat, 18 Dec 2021 21:44:42 +0000contact@johnhollowell.com (John Hollowell)Sat, 18 Dec 2021 21:44:42 +0000https://johnhollowell.com/blog/posts/masters-degree-takeaways/This December I graduated from Clemson University with my Masters degree in Computer Science. There were a lot of things I learned (obviously), but I think the most useful things I learned were not from the course material, but my outside learning and interactions with peers and professors. +Project Organization All my courses this semester were project based; while some of the grade for the course came from quizzes or homeworks, over 50% came from a semester long project.John Hollowellfeatured imageclemsonlifeopinionNothing Is Definitely Worse Than a Tricklehttps://johnhollowell.com/blog/posts/nothing-is-definitely-worse-than-a-trickle/Sat, 26 Jun 2021 17:02:39 +0000contact@johnhollowell.com (John Hollowell)Sat, 26 Jun 2021 17:02:39 +0000https://johnhollowell.com/blog/posts/nothing-is-definitely-worse-than-a-trickle/Please read A Trickle Is Better Than Nothing before reading this post. I just got over having no internet at my apartment for over a week. I was gone a portion of the week, but it was still very inconvenient. Working remotely doesn’t help as to get paid I need to have an internet connection (but not necessarily a fast connection). +Working Around It While I could have use cellular data to carry me through, I had already used a significant portion of my data cap on various travels this summer.John Hollowellfeatured imageweblifeopinionnetworksA Trickle Is Better Than Nothinghttps://johnhollowell.com/blog/posts/a-trickle-is-better-than-nothing/Sat, 02 Jan 2021 21:31:47 -0500contact@johnhollowell.com (John Hollowell)Sat, 02 Jan 2021 21:31:47 -0500https://johnhollowell.com/blog/posts/a-trickle-is-better-than-nothing/I’m at my extended family’s house way out in the middle of nowhere; barely enough cellular connection for an SMS, let alone trying to use any data. +They have DSL, but they are so far out that the signal is poor and it also is horrible speed. The fastest I saw while I was there was 700Kbps. +While it is always a shock to go from over 100Mbps to under 1Mbps, I think that we are in an age where low bandwidth is not a show-stopper.John Hollowellweblifenetworksopinion \ No newline at end of file diff --git a/categories/clemson/page/1/index.html b/categories/opinion/page/1/index.html similarity index 55% rename from categories/clemson/page/1/index.html rename to categories/opinion/page/1/index.html index fddb2e7..1664654 100644 --- a/categories/clemson/page/1/index.html +++ b/categories/opinion/page/1/index.html @@ -1,2 +1,2 @@ -https://johnhollowell.com/blog/categories/clemson/ - \ No newline at end of file +https://johnhollowell.com/blog/categories/opinion/ + \ No newline at end of file diff --git a/categories/review/index.html b/categories/review/index.html index 930b3a0..d694aa6 100644 --- a/categories/review/index.html +++ b/categories/review/index.html @@ -21,6 +21,8 @@ 1 backup 1 +clemson +3 cloud 1 cluster @@ -29,18 +31,18 @@ 1 development 1 +gaming +1 hardware -3 +4 life -6 +4 Linux 1 memory 1 networks 3 -opinion -3 proxmox 1 SSH @@ -53,20 +55,20 @@ 1
Categories 101 2 -clemson -1 conference 1 guide 4 +life +2 meta 1 networks 3 +opinion +3 review 2 -story time -1 work 1
Series framework diff --git a/categories/story-time/index.xml b/categories/story-time/index.xml deleted file mode 100644 index 7e4f8f6..0000000 --- a/categories/story-time/index.xml +++ /dev/null @@ -1 +0,0 @@ -story time on /dev/random: A Bit of Everythinghttps://johnhollowell.com/blog/categories/story-time/Recent content in story time on /dev/random: A Bit of EverythingHugo -- gohugo.ioencontact@johnhollowell.com (John Hollowell)contact@johnhollowell.com (John Hollowell)©2023 John Hollowell, All Rights ReservedSun, 24 Dec 2023 01:32:12 +0000Unraveling the Mystery of NFS Hangs, or How The (Hybrid) Cloud is a Painhttps://johnhollowell.com/blog/posts/aws-tg-mtu/Sun, 24 Dec 2023 01:32:12 +0000contact@johnhollowell.com (John Hollowell)Sun, 24 Dec 2023 01:32:12 +0000https://johnhollowell.com/blog/posts/aws-tg-mtu/The Tale Begins There I was, triaging a new issue that came in. A Linux VM running in the cloud was hanging when we started trying to run our workload on it. Huh, there was no output at all from the python script; it didn’t even create its log file, one of the first things it should do. Logging into the cloud instance, I looked around and noticed there was a python process running for the script we started, so the connection to the host and creating the python process at least worked.John HollowellcloudAWSnetworksworkstory time \ No newline at end of file diff --git a/categories/story-time/page/1/index.html b/categories/story-time/page/1/index.html deleted file mode 100644 index 2dd714a..0000000 --- a/categories/story-time/page/1/index.html +++ /dev/null @@ -1,2 +0,0 @@ -https://johnhollowell.com/blog/categories/story-time/ - \ No newline at end of file diff --git a/categories/work/index.html b/categories/work/index.html index 5ae2dc7..a9a05a2 100644 --- a/categories/work/index.html +++ b/categories/work/index.html @@ -21,6 +21,8 @@ 1 backup 1 +clemson +3 cloud 1 cluster @@ -29,18 +31,18 @@ 1 development 1 +gaming +1 hardware -3 +4 life -6 +4 Linux 1 memory 1 networks 3 -opinion -3 proxmox 1 SSH @@ -53,20 +55,20 @@ 1
Categories 101 2 -clemson -1 conference 1 guide 4 +life +2 meta 1 networks 3 +opinion +3 review 2 -story time -1 work 1
Series framework diff --git a/categories/work/index.xml b/categories/work/index.xml index 55be9d7..422c670 100644 --- a/categories/work/index.xml +++ b/categories/work/index.xml @@ -1 +1 @@ -work on /dev/random: A Bit of Everythinghttps://johnhollowell.com/blog/categories/work/Recent content in work on /dev/random: A Bit of EverythingHugo -- gohugo.ioencontact@johnhollowell.com (John Hollowell)contact@johnhollowell.com (John Hollowell)©2023 John Hollowell, All Rights ReservedSun, 24 Dec 2023 01:32:12 +0000Unraveling the Mystery of NFS Hangs, or How The (Hybrid) Cloud is a Painhttps://johnhollowell.com/blog/posts/aws-tg-mtu/Sun, 24 Dec 2023 01:32:12 +0000contact@johnhollowell.com (John Hollowell)Sun, 24 Dec 2023 01:32:12 +0000https://johnhollowell.com/blog/posts/aws-tg-mtu/The Tale Begins There I was, triaging a new issue that came in. A Linux VM running in the cloud was hanging when we started trying to run our workload on it. Huh, there was no output at all from the python script; it didn’t even create its log file, one of the first things it should do. Logging into the cloud instance, I looked around and noticed there was a python process running for the script we started, so the connection to the host and creating the python process at least worked.John HollowellcloudAWSnetworksworkstory time \ No newline at end of file +work on /dev/random: A Bit of Everythinghttps://johnhollowell.com/blog/categories/work/Recent content in work on /dev/random: A Bit of EverythingHugo -- gohugo.ioencontact@johnhollowell.com (John Hollowell)contact@johnhollowell.com (John Hollowell)©2023 John Hollowell, All Rights ReservedSun, 24 Dec 2023 01:32:12 +0000Unraveling the Mystery of NFS Hangs, or How The (Hybrid) Cloud is a Painhttps://johnhollowell.com/blog/posts/aws-tg-mtu/Sun, 24 Dec 2023 01:32:12 +0000contact@johnhollowell.com (John Hollowell)Sun, 24 Dec 2023 01:32:12 +0000https://johnhollowell.com/blog/posts/aws-tg-mtu/The Tale Begins There I was, triaging a new issue that came in. A Linux VM running in the cloud was hanging when we started trying to run our workload on it. Huh, there was no output at all from the python script; it didn’t even create its log file, one of the first things it should do. Logging into the cloud instance, I looked around and noticed there was a python process running for the script we started, so the connection to the host and creating the python process at least worked.John HollowellcloudAWSnetworkswork \ No newline at end of file diff --git a/index.html b/index.html index fe16da8..bfa5572 100644 --- a/index.html +++ b/index.html @@ -1,5 +1,5 @@ /dev/random: A Bit of Everything -
Framework Followup
Framework Followup
· ☕ 5 min read
After living with the 13" Framework laptop and releases of new specs for the 13" and plans for the 16", I've got some thoughts on my Framework

Framework First Impressions
Framework First Impressions
· ☕ 12 min read
I recently upgraded my laptop to a Framework laptop since my old trusty laptop's screen cracked and a replacement screen cost as much as new some laptops. These are my initial impressions of the laptop's build, performance, and usability.

Masters Degree Takeaways
Masters Degree Takeaways
· ☕ 4 min read
This December I graduated from Clemson University with my Masters degree in Computer Science. There were a lot of things I learned (obviously), but I think the most useful things I learned were not from the course material, but my outside learning and interactions with peers and professors.

ZFS Backups to Files
· ☕ 9 min read
ZFS is a great filesystem which I use on most of my systems and it makes full-drive backups a breeze. However, sometimes I want to backup to a non-ZFS system. These are the steps I use for fast and verified backups to a file on another computer.

Getting Started With Devcontainers
Getting Started With Devcontainers
· ☕ 3 min read
Setting up and maintaining a development environment is hard, especially when you need to destructively test features or libraries.

TIL: AD Forests
TIL: AD Forests
· ☕ 1 min read
Today I Learned about Active Directory Forests

Change Username Without Separate Session
· ☕ 5 min read
Changing a user's username on Linux requires no processes be running under that user. This makes sense, but what if we only have that user accessible through a SSh connection?

Cluster SSH
Cluster SSH
· ☕ 3 min read
One of the most important parts of a working cluster is the interconnection and communication between nodes. While the networking side will not be covered now, a very important aspect will be: passwordless SSH.

This page looks best with JavaScript enabled

Framework First Impressions

 ·  ☕ 12 min read

I recently upgraded my laptop to a Framework laptop since my old trusty laptop’s screen cracked and a replacement screen cost as much as some new laptops. These are my initial impressions of the laptop’s build, performance, and usability.

Use Case

I have a bit of a minimal use case for my laptop. Since I have a powerful desktop and a fairly performant phone, I don’t need my laptop to be a do-everything device. If I need to do something that requires a lot of performance (gaming, heavy development builds, video transcode, etc), I will use my desktop. If I need to quickly do something, I will use the phone that is always in my pocket or on the desk next to me. My laptop fulfils three main functions: portable large-screen remote access to desktop, couch web-browsing and light development, and media consumption while on the road.

Desktop Remote

The main place I will install games and software, store some files, and do high-performance tasks is on my desktop. I often will need or want to do something on my desktop while not sitting at my desk. Be it from a few meters away on the couch or thousands of kilometers away, I will often remote into my desktop from my laptop. There are not really any specific requirements, but a large screen, enough CPU performance to decode the remote screen stream, and good enough networking to get the connection through. This is honestly the lowest performance need for a laptop, but having hardware decode for whatever remote solution would provide long battery life for this use case.

Couch Computer

This is the middle-of-the-road use case in terms of requirements. It is mostly web browsing, some light video consumption, and low-demand development/writing (like writing this blog). I use VS Code devcontainers for just about everything, so being able to run docker and VS Code well is a must. Mostly, this presents as having enough memory for the containers, VS Code (thanks memory-hungry electron), and all the extensions I typically use. Occasionally, having some performance is nice to be able to build a new dev container (fast network to pull dependencies, fast CPU to decompress image layers and compile dependencies, and mostly fast disk to support fast installation of packages, create new layers, etc.) and makes getting started contributing to a new project incredibly streamlined.

On-the-road System

This is the most taxing use case that I have for my laptop. This is everything from Couch Computer and more. Some video transcoding (compressing) of footage I’ve taken, some light (and not-so-light) gaming, and occasionally some heavy network traffic (using my laptop as a portable NAS or sneaker-net).

This is also the use case where the connectivity of the laptop is the most important. From hooking into projectors using HDMI, to needing ethernet for some network troubleshooting, to flashing a Raspberry Pi or reading images from an SD card, the most variability in how I interact with my computers is on the road. The ample expansion/connectivity modules make it easier to have the right connector where I want it, when I want it. Also, the ability to move my ports around mean I will never have to do the awkward my-HDMI-is-on-the-wrong-side-for-this-podium dance again. Further, having 4 thunderbolt USB-C ports means that even if there is not an official module for what you want, you can easily connect a dongle or even make your own modules. Always in the data center? make yourself an RS-232 serial port module for interacting with all the serial consoles on your hardware.

Desktop Replacement

As a bonus use case, I will very, very rarely use my laptop at my desk instead of my desktop. My work laptop usually sits on my desk, plugged into a thunderbolt dock connected to all my peripherals and monitors. Every once in a while, I might use this setup with my personal laptop in this setup if I was working on some project on my laptop that would be too cumbersome to move to my desktop but might benefit from the extra monitors and peripherals.

Build

Form Factor

The Framework is a 13.5" laptop with a 3:2 screen ratio. While I’m used to my previous laptop’s 15" form factor, the added height of the Framework’s screen and higher resolution maintains a good amount of screen real estate. It also provides a more compact body which is more portable and takes up less space on a desk. Weighing in at 4.4 lb, it isn’t a light laptop, but the incredibly sturdy chassis and zero deck flex on the keyboard are reason enough for the bit of weigh.

Power and Battery

It uses Type-C (USB-PD) for charging via any of the 4 expansion ports when a USB-C expansion module is installed (or really you can directly connect to the type-c ports at the back of the expansion ports). This allows charging from either side of the laptop which brings a great versatility. While writing this, the idle power draw was ~15W at a medium-low screen brightness. Running a benchmark, the draw from the USB-C charger reached ~62W (on a 90W charger).Charging from 0% to ~80% while powered off averaged around 40W. Charging from ~85% to 100% averaged around a 30W draw (~10W to the battery and ~15W to the idle running system).

Keyboard

The keyboard is easy to type on with ample key spacing and a sensible key layout. I wrote this whole post on the Framework’s keyboard. The keys have good stabilization and have a comfortable travel distance. The palm rest areas beside the trackpad are large enough to use and the keyboard is centered on the chassis so one hand/wrist is more extended than the other.Overall, an easy keyboard on which to type.

Trackpad

Not much to say about the trackpad, and that is a good thing. The trackpad is a nice size: not too small to be useless and not too large to be cumbersome to use. It has a nice tactile click when pressed (which I rarely notice since I mostly tap-to-click rather than use the actual displacement button method of clicking) and a smooth surface which is easy to swipe across. The trackpad’s palm rejection while typing is very good, but the button still functions while the movement is disabled. If you place a lot of weight on the insides of your hands while typing, you may need to be careful to not push too hard on the trackpad while typing. The typical multi-touch gestures work correctly and smoothly zoom, swipe, and the rest.

Speakers

The speakers on the Framework have impressed me so far. I will use earphones/headphones over speakers most of the time, but the speakers are much better than my previous laptop’s speakers and are a nice, usable option. They are quite loud and even at 100% there is no distortion, clipping, or chassis rattle. Although the speakers are down-firing at the front (user-facing side), they are on the angled bevel of the side so even sitting atop a flat surface the speakers fire out and around the chassis to provide a well-balanced sound profile.

Performance

CPU

My Framework performs well. I got the i5 12th gen variant (i5-1240P, up to 4.4 GHz, 4+8 cores) as a low power yet still performant portable system. Following on the Desktop Remote section above, I very rarely need my laptop to be very performant. What I want most of the time is something that can boost to do a little bit of compute while mostly being a power-efficient system that can run web apps, remote desktop software, and YouTube. The system excels at these tasks. I’ll leave the hard numbers and comparisons to benchmark publications, but the system has done everything (within reason) I’ve thrown at it.

Memory

While it may seem basic, the ability to have socketed memory can’t be ignored in modern laptops. Being able to upgrade and/or expand your system’s memory down the line is one of the simplest ways to give an old machine a boost. However, a lot of new machines are coming out with soldered memory that can’t be upgraded, expanded, or replaced. The availability of 2 SODIMM slots for memory is a great feature for repairability and the longevity of the system.

Cooling and Fan

One disappointing aspect of the Framework is its cooling system and fan. When idle, the fan is inaudible and the user-facing components stay cool. However, even when idle the bottom chassis panel gets slightly too warm to hold for a long time. While on a desk, this is not an issue but when on a lap (where the lap in laptop comes from), the heat it a bit too much for bare skin contact and going hand-held with one hand on the bottom for support is not comfortable to hold. However, even when running full-tilt under a stress test, the top (keyboard, trackpad, and palm rest areas) stayed cool and comfortable.

The cooling fan, when going at full speed, is loud but does an adequate job of keeping the internals cool and preventing drastic thermal throttling. A concern I had heard from others was with the vent being in the hinge and concerns over the cooling capacity of the system while the screen is closed. After some tests, the hinge cover is shaped to direct the exhaust air out the bottom of the hinge which gives enough airflow to keep the system cool.

WiFi 6E

While I currently don’t have any other wifi gear which supports 6E to test against, I believe 6 GHz is going to be super useful in the coming years and having a computer that already supports it is a great feature. And even if it didn’t have a 6E chip in it, the Framework’s wifi is socketed which allows for future improvement.

For what I can test, the Framework’s WiFi works well. It gets the maximum speed my Access Point (AP) supports and has very good range. I haven’t noticed any difference it reception between different orientations of the laptop, so the antenna placement seems to be the best it can be.

Usability

I/O

The ability to select the I/O that your laptop has is one of the obvious usability features of the Framework. The ability to have up to 4 USB-C thunderbolt ports is impressive and the various modules to adapt those ports into other common ports is fantastic. My favorite ability so far is just having a USB-C port on both sides of the laptop. When I was searching for a new laptop, few had a Type-C port and even fewer had at least one on both sides. The Framework works well with all the USB-C and thunderbolt docks and dongles that I have used with it.

Battery

Another great usability feature is the long battery life. The combination of an efficient processor and a high-capacity battery makes the Framework able to stay running for hours.

Security, Privacy, and Webcam

For security and privacy, the Framework has several great features. For signing in (on supported OSes), you can use the fingerprint sensor integrated into the power button for authentication. While my previous laptop had a Windows Hello capable camera, the fingerprint reader is just about as easy to use. The fingerprint reader works well

On the webcam, the Framework has physical toggles to disable the webcam and disable the microphone (independently). They toggles have a nice red section visible when disabled and the camera has a light when it is active. It is really nice to have physical switches for the cameras, and since I am using the fingerprint sensor for login (instead of the facial recognition of my previous laptop), I can leave the camera disabled most of the time. The camera is 1080p and does a good enough job with challenging situations like low light and high contrast environments.

Screen

The screen is a 2256 x 1504 (3:2) glossy screen. The extra screen real estate is nice for tasks that can make use of the extra vertical space, media consumption which is mostly 16:9 or wider leaves unused space on the screen. The maximum brightness of the screen is quite bright and is easily visible in direct sunlight. The screen also has a light detector which can be used for automatic screen brightness adjustments. However, at least in Windows, the auto brightness works well but causes a massive jump in brightness when adjusting to above ~50%. Due the the glossy, highly-reflective screen, bright sun from behind makes it hard to read the screen even at maximum brightness. I’m planning to investigate what matte screen films/protectors are available that I could use to make the screen less reflective. As I will very rarely use my laptop for very color accurate uses, a matte screen would be better.

Windows Install and Drivers

One cautionary note revolves around the newer, less used components in the Framework. I installed Windows 10 and out of the box, the trackpad and WiFi did not work. I had to use an Ethernet dongle (since I did not get the ethernet Framework module) to download the driver pack from Framework’s website. It did not automatically get the drivers from Windows Update like most other firmware/drivers. I also tried Ubuntu 22.04, and while it had fully functional WiFi and and trackpad out of the box, it did not properly adjust the screen backlight based on the function keys (but was able to control the brightness manually using the OS settings slider).

Overall Impressions

Overall, I really like my Framework laptop so far. I did not think I would like the smaller size, but setting the display scaling to lower than the default of 200% (I’m testing between 175% and 150%) give more than enough screen space for task I need to do on my laptop. After writing this whole post on the keyboard both on a couch and a desk, it is comfortable to type on and quick to pick up touch typing. It is small and portable while having good performance, battery longevity, and screen real estate. I wish it was a bit bigger as I like a laptop with a larger screen, but for the chassis size the screen is nearly 100% of the size of the laptop footprint. With a 11-in-1 USB dongle, it has as much or more connectivity than my desktop. It works flawlessly with thunderbolt docks (at least the ones I have tested). The first install of Windows 10 was a little painful having to install the driver bundle, but that is a small, one-time price to pay for a nice machine on an old OS.

9.5/10. Would recommend.

\ No newline at end of file diff --git a/posts/framework-followup/index.html b/posts/framework-followup/index.html index 8580a11..804593e 100644 --- a/posts/framework-followup/index.html +++ b/posts/framework-followup/index.html @@ -12,6 +12,6 @@ solarized kimbieabout archive -posts
This page looks best with JavaScript enabled

Framework Followup

 ·  ☕ 5 min read

I’ll start off by saying I love my Framework laptop. The transition from my old 15" laptop to this 13" Framework has been a lot more seamless than I thought it would be. It has worked perfectly for everything I’ve put it through.

My Experience With My Framework

Battery Life

Even with the recently-replaced batter in my old laptop, my Framework has a much longer battery life. Likely a combination of both the battery and processor, I’m able to get many hours of even a demanding workload. I’me able to have Discord open in a video call for hours while having many other browser tabs or games running without the worry of where my charger is.

Lap-ability

The one loss from moving from a 15" laptop to a 13" laptop is the lessened ability to use it effectively on my lap while connected to cords. The smaller size of the 13" means that it sits more between my legs rather than fully on top of my legs. This is normally fine, especially since the fan vents to the rear rather than to the right or left so my legs aren’t getting blasted with heat, but it does make having cables connected to the ports is difficult and strains the cables’ connectors.

Thankfully, I typically only need to have my charger connected to my laptop, so I found a solution. Since my charger is a type-c charger, I can just pop out one of my modules and directly connect the charger’s cable to the deeply-inset type-c port behind where the module would go. This make only the small cable be pressed against my leg and does not put any strain against the cable.

Charging Fan

One thing that has disappointed about my Framework is the leaf blower it turns into when plugged in to charge (when the battery is discharged). I think a combination of moving from the “Better Battery” Windows power profile while on battery to “Best Performance” when plugged in and the extra heat from the high-speed charging capabilities means the fan kicks up to be quite loud when plugging in. I have not played around much with power profiles to try to reduce this, but it typically only lasts for a short time and I almost always prefer the better performance rather than a bit of ignore-able noise for a bit.

Physical Camera/Microphone Switches

I didn’t think this would be a big thing, but it is really nice to be able to have confidence that at the hardware level, my mic and camera are not able to be accessed.

E Cores

As I have a wide, eclectic collection of software I run on a regular basis, I was please to not run into many issues with programs not properly understanding/scheduling with the efficiency cores on the 12th gen Intel processor. There are some tools (e.g. zstd) which doesn’t properly gather the cores to use. However this could be due to running some of these quirky tools in WSL and how some tools try to detect hyper-threading to schedule themselves only on physical cores.

FOMO?

Now that 13th gen Intel and AMD mainboards have come out for the 13" Framework, do I feel like I am missing out or should have waited? not at all. If I would have needed a laptop once the 13th gen had come out, I would definitely have chosen to use the 13th gen mainboard, but I am happy with what I have. Especially since I rarely have a use case for a high-performance laptop, I’m very comfortable with my 12th gen.

Part of the appeal of the Framework is that I don’t have to have as much of a fear of missing out. The new laptops all have the same hardware outside of the mainboard. If I want a 13th gen laptop, I can easily upgrade my existing laptop to the 13th gen and get a 12th gen computer to use as a server, media PC, etc. And if I keep my laptop for long enough that the hardware is wearing out, I can replace the parts that are broken (or of which I want an improved version) and keep all the remaining parts, reducing the cost of repair and keeping still-good parts from ending up e-waste.

As for regrets getting the Framework rather than some other newer system, I have none. I have not stayed as up-to-date with the laptop scene since I’m not currently in need of a new one, but the systems that I have seen have not presented any better features or performance for my use cases. Some of the new Apple laptops have been interesting to follow, but I’m not a big fan of many aspects of Apple’s hardware and ecosystem and I still do come across some software that is not compiled for ARM (a big one being Windows). I love ARM and use it quite a bit in my homelab (mostly Raspberry Pis), but for my main system is just not quite universal enough for a daily driver.

Conclusion

Overall, I’m very happy with my Framework and would absolutely recommend it to others. Yes, it is more expensive than another laptop with comparable specs, but the Framework’s build quality is supreme. If your use of laptops is more disposable, the Framework may not be for you (and that is okay), but I value the goals of the Framework and truly expect to get my money’s worth out of the repairability and modularity of the Framework.

\ No newline at end of file diff --git a/tags/gaming/index.xml b/tags/gaming/index.xml new file mode 100644 index 0000000..ee3ddf0 --- /dev/null +++ b/tags/gaming/index.xml @@ -0,0 +1,3 @@ +gaming on /dev/random: A Bit of Everythinghttps://johnhollowell.com/blog/tags/gaming/Recent content in gaming on /dev/random: A Bit of EverythingHugo -- gohugo.ioencontact@johnhollowell.com (John Hollowell)contact@johnhollowell.com (John Hollowell)©2023 John Hollowell, All Rights ReservedTue, 26 Dec 2023 16:53:01 +00006 Months with the Steam Deckhttps://johnhollowell.com/blog/posts/steamdeck/Tue, 26 Dec 2023 16:53:01 +0000contact@johnhollowell.com (John Hollowell)Tue, 26 Dec 2023 16:53:01 +0000https://johnhollowell.com/blog/posts/steamdeck/I’ve had my Steam Deck (256GB version) for a bit over 6 months now and I love it! Being a mostly keyboard-and-mouse gamer, that was a bit of a surprise to me. +Unboxing Experience Opening the Steam Deck was an easy process. The packaging has fun Valve-y designs and contains the contents well without using unneeded extra packaging. +Setup (or lack thereof) The initial setup process was as simple as possible.John Hollowellhardwaregaming \ No newline at end of file diff --git a/tags/gaming/page/1/index.html b/tags/gaming/page/1/index.html new file mode 100644 index 0000000..117723c --- /dev/null +++ b/tags/gaming/page/1/index.html @@ -0,0 +1,2 @@ +https://johnhollowell.com/blog/tags/gaming/ + \ No newline at end of file diff --git a/tags/hardware/index.html b/tags/hardware/index.html index 8fe30bc..d4e01cb 100644 --- a/tags/hardware/index.html +++ b/tags/hardware/index.html @@ -1,5 +1,5 @@ hardware – /dev/random: A Bit of Everything -
hardware
Framework Followup
Framework Followup
· ☕ 5 min read
After living with the 13" Framework laptop and releases of new specs for the 13" and plans for the 16", I've got some thoughts on my Framework

Framework First Impressions
Framework First Impressions
· ☕ 12 min read
I recently upgraded my laptop to a Framework laptop since my old trusty laptop's screen cracked and a replacement screen cost as much as new some laptops. These are my initial impressions of the laptop's build, performance, and usability.

Basic Cluster Setup
Basic Cluster Setup
· ☕ 5 min read
The basics of getting a cluster of Raspberry Pis powered on and running. Full cluster configuration in later posts.