-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathindex.html
More file actions
479 lines (461 loc) · 27.5 KB
/
index.html
File metadata and controls
479 lines (461 loc) · 27.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0, viewport-fit=cover">
<title>Nate's Portfolio</title>
<link rel="icon" type="image/svg+xml"
href="data:image/svg+xml,<svg xmlns=%22http://www.w3.org/2000/svg%22 viewBox=%220 0 100 100%22><text y=%22.9em%22 font-size=%2290%22 font-family=%22Optima, Iowan Old Style, Seravek, sans-serif%22 fill=%22%23ff4d8d%22>N</text></svg>">
<link rel="stylesheet" href="./style.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.5.1/css/all.min.css">
</head>
<body>
<div class="page-wrapper">
<header class="hero">
<h1>Nathan Lodder</h1>
<p>Robotics, physical and mathematical modelling, and human-computer interfacing</p>
</header>
<main class="landing-container">
<nav class="centered-menu">
<ul class="menu-list">
<li><a href="https://www.linkedin.com/in/nathan-lodder/" target="_blank" class="social-link">
<i class="fa-brands fa-linkedin-in"></i>
<span>linkedin</span>
</a>
</li>
<li><a href="https://github.com/nlodder" target="_blank" class="social-link">
<i class="fa-brands fa-github"></i>
<span>github</span>
</a>
</li>
</ul>
</nav>
<!-- PET RESCUE ROBOT PROJECT -->
<section class="project-block">
<div class="project-media">
<div class="media-scroller">
<figure class="media-item">
<video autoplay muted loop playsinline poster="/assets/images/robotSum/PetRescDump.jpeg">
<source src="/assets/videos/robotSum/PetRescRobotTwoPet.mp4" type="video/mp4">
Your browser does not support the video tag.
</video>
<figcaption>1. Robot collecting pets</figcaption>
</figure>
<figure class="media-item">
<img src="/assets/images/robotSum/PetRescSide.jpeg" alt="Perspective view of robot" loading="lazy">
<figcaption>2. Perspective View</figcaption>
</figure>
<figure class="media-item">
<img src="/assets/images/robotSum/servoPower.jpg" alt="Servo power diagram" loading="lazy">
<figcaption>3. Servo Power Diagram</figcaption>
</figure>
<figure class="media-item">
<img src="/assets/images/robotSum/magnetometer.png" alt="Magnetometer diagram" loading="lazy">
<figcaption>4. Magnetometer Functional Overview</figcaption>
</figure>
<figure class="media-item">
<img src="/assets/images/robotSum/PetRescDump.jpeg" alt="Robot collecting pet" loading="lazy">
<figcaption>5. Robot Collecting Pet</figcaption>
</figure>
<figure class="media-item">
<img src="/assets/images/robotSum/PetRescSide2.jpeg" alt="Side view of robot" loading="lazy">
<figcaption>6. Side View</figcaption>
</figure>
</div>
<div class="scroller-dots">
<span class="dot active"></span>
<span class="dot"></span>
<span class="dot"></span>
<span class="dot"></span>
<span class="dot"></span>
<span class="dot"></span>
</div>
</div>
<div class="project-info">
<h2 class="project-header">Pet Rescue Robot</h2>
<ul class="project-tools">
<li class="project-tool-header">Tools</li>
<li>C++</li>
<li>ESP32</li>
<li>LiDar Sensors</li>
<li>Magnetometer Sensors</li>
<li>Servo Motors</li>
<li>OnShape</li>
</ul>
<ul class="project-skills">
<li class="project-skill-header">Skills</li>
<li>Object Oriented Programming</li>
<li>3D CAD</li>
<li>Circuit Design</li>
<li>Sensor Characterization</li>
<li>Prototyping</li>
<li>I2C Protocol</li>
<li>UART Protocol</li>
</ul>
<p class="project-description">
As part of a team of four, I designed, built, and competed with an autonomous robot capable
of seeking out and rescuing small stuffed animals (“pets”) from an obstacle-filled environment.
Our robot placed 2nd out of 15 teams in the final course competition.<br><br>
We integrated a range of hardware and control systems:<br>
- LiDAR for initial target detection and navigation support<br>
- Infrared reflectance sensor array in a PID feedback loop for line-following navigation<br>
- Dual triple-axis magnetometers to guide the robotic claw toward magnetic
targets<br>
- ESP32 microcontrollers, servos, DC motors, and other fundamental drive and control
components<br>
- I2C and UART Protocols for sensor and inter-microcontroller communication respectivley<br><br>
The robot was powered by three lithium-polymer batteries, and its chassis, storage bay, and
arm were built from laser-cut hardboard and 3D-printed PLA components designed in OnShape.
</p>
</div>
</section>
<!-- ROBOTICS SIMULATOIN -->
<section class="project-block">
<div class="project-media">
<div class="media-scroller">
<figure class="media-item">
<img src="/assets/images/enph353/debugWindow.png" alt="Navigation debugging view" loading="lazy">
<figcaption>1. Navigation debugging view</figcaption>
</figure>
<figure class="media-item">
<img src="/assets/images/enph353/rpyControl.png" alt="Roll and pitch control flow" loading="lazy">
<figcaption>2. Roll and pitch control flow</figcaption>
</figure>
<figure class="media-item">
<img src="/assets/images/enph353/flyingDrone.png" alt="Drone in simulation" loading="lazy">
<figcaption>3. Drone in simulation</figcaption>
</figure>
<figure class="media-item">
<img src="/assets/images/enph353/blueMask.png" alt="Image processing-blue mask" loading="lazy">
<figcaption>4. Image processing-blue mask</figcaption>
</figure>
<figure class="media-item">
<img src="/assets/images/enph353/morphology.png" alt="Image processing-morphological closing" loading="lazy">
<figcaption>5. Image processing-morphological closing</figcaption>
</figure>
<figure class="media-item">
<img src="/assets/images/enph353/signHull.png" alt="Image processing-contours & convex hull" loading="lazy">
<figcaption>6. Image processing-contours & convex hull</figcaption>
</figure>
</div>
<div class="scroller-dots">
<span class="dot active"></span>
<span class="dot"></span>
<span class="dot"></span>
<span class="dot"></span>
<span class="dot"></span>
<span class="dot"></span>
</div>
</div>
<div class="project-info">
<h2 class="project-header">Simulated Drone Control</h2>
<ul class="project-tools">
<li class="project-tool-header">Tools</li>
<li>Python</li>
<li>OpenCV</li>
<li>ROSa</li>
<li>Gazebo</li>
<li>OnShape</li>
<li>GitHub</li>
</ul>
<ul class="project-skills">
<li class="project-skill-header">Skills</li>
<li>Networking</li>
<li>Bash Scripting</li>
<li>Drone Control</li>
<li>Version Control</li>
</ul>
<a href="https://github.com/nlodder/enph353-drone-controllers" target="_blank" class="social-link">
<i class="fa-brands fa-github"></i>
<span>drone controllers</span>
</a>
<a href="https://github.com/nlodder/enph353-drone-bringup" target="_blank" class="social-link">
<i class="fa-brands fa-github"></i>
<span>drone launcher</span>
</a>
<a href="https://github.com/nlodder/enph353-drone-desc" target="_blank" class="social-link">
<i class="fa-brands fa-github"></i>
<span>drone definition</span>
</a>
<a href="https://github.com/nlodder/enph353-drone-msgs" target="_blank" class="social-link">
<i class="fa-brands fa-github"></i>
<span>drone message</span>
</a>
<p class="project-description">
Created two-drone team to navigate simulated environment to collect data from signs in ROS/Gazebo.
<br><br>
Controller nodes were implemented in Python and communicated via rostopics. For high-frequency inter-drone communication
I created a custom lightweight message type (see drone message repository). Controllers and stabilizers would
send movement demands to a command bridge node which would apply all requested forces to the drone. This was to
isolate ownership of force application to a single node and reduce the number of service calls to Gazebo's
costly ApplyBodyWrench service.
<br><br>
Roll and pitch stabilization was handled by a node that took inputs from an IMU link on the drone body and published
required correction velocities (to be read by the command bridge) after passing the roll, pitch and angular velocity
values through a PID function.
<br><br>
I processed the front camera feed with OpenCV for navigation. I used the difference of the horizontal centroid of
blue signs from the centre of the frame as the error for left/right strafing and yaw PID controllers. For forward/backward movement
I used the relative height of the sign in the frame, pushing the drone back if the sign was too large in frame, and forward if the sign
was too small in frame, also through a PID function.
<br><br>
The base drone design was performed in OnShape and converted to a URDF file through the
<a href="https://onshape-to-robot.readthedocs.io/en/latest/">onshape-to-robot plugin</a>. I wrote a python script to convert
this .urdf file to a .xacro file for cleanliness and better handling of repeat parts. This file was then modified per developed
needs in controller development (adding sensors, modifying drone colors etc.).
</p>
</div>
</section>
<!-- DESIGN TEAM PROJECT -->
<section class="project-block">
<div class="project-media">
<div class="media-scroller">
<figure class="media-item">
<video autoplay muted loop playsinline poster="/assets/images/enable/bannerOverall.jpeg">
<source src="/assets/videos/enable/overallSetupLite.mov" type="video/mp4">
Your browser does not support the video tag.
</video>
<figcaption>1. Sensor overlay on camera feed prototype setup</figcaption>
</figure>
<figure class="media-item">
<video autoplay muted loop playsinline poster="/assets/images/enable/bannerUltra.jpeg">
<source src="/assets/videos/enable/ultra.mov" type="video/mp4">
Your browser does not support the video tag.
</video>
<figcaption>2. Sensor overlay on camera feed prototype setup</figcaption>
</figure>
<figure class="media-item">
<img src="/assets/images/enable/setup.jpeg" alt="Raspberry Pi setup" loading="lazy">
<figcaption>3. Raspberry Pi and ultrasonic sensors prototyping</figcaption>
</figure>
<figure class="media-item">
<img src="/assets/images/enable/guiOverlay.jpeg" alt="GUI overlay" loading="lazy">
<figcaption>4. Distance data overlay on camera feed</figcaption>
</figure>
</div>
<div class="scroller-dots">
<span class="dot active"></span>
<span class="dot"></span>
<span class="dot"></span>
<span class="dot"></span>
</div>
</div>
<div class="project-info">
<h2 class="project-header">Wheelchair Backup Assist Proximity Overlay</h2>
<ul class="project-tools">
<li class="project-tool-header">Tools</li>
<li>Python</li>
<li>Raspberry Pi</li>
<li>Raspberry Pi OS Linux</li>
<li>Ultrasonic Sensors</li>
<li>Data Processing</li>
</ul>
<ul class="project-skills">
<li class="project-skill-header">Skills</li>
<li>Software Construction</li>
<li>Command Line Interfacing</li>
<li>Sensor Characterization</li>
<li>Prototyping</li>
</ul>
<p class="project-description">
As part of the Electrical Sub-team at UBC BEST-Enable I programmed Raspberry Pi
to stream video from a Picam to a small monitor with a proximity-indicator
overlay to provide client in electric wheelchair with visibility and proximity
feedback when reversing in tight environments.<br><br>
This was my first introduction to working on a linux system and debugging
in the command line where our primary struggle was interfacing with the
external display to create a clean and effective stream for our client. We
overcame this by looking through the openCV documentation to find potential
solutions, landing on the Python OpenCV <a href="https://docs.opencv.org/4.x/d7/dfc/group__highgui.html#ga5afdf8410934fd099df85c75b2e0888b">namedWindow()</a> method.
</p>
</div>
</section>
<!-- VEXBOT PROJECT-->
<section class="project-block">
<div class="project-media">
<div class="media-scroller">
<figure class="media-item">
<img src="/assets/images/vexbot/BallCollRob4Balls.jpeg" alt="Vexbot holding four balls"
loading="lazy">
<figcaption>1. Balls Collected</figcaption>
</figure>
<figure class="media-item">
<img src="/assets/images/vexbot/BallCollRobFront.jpeg" alt="Front of Vexbot" loading="lazy">
<figcaption>2. Front View</figcaption>
</figure>
<figure class="media-item">
<img src="/assets/images/vexbot/BallCollRob-P1.jpeg" alt="Perspective view of Vexbot"
loading="lazy">
<figcaption>3. Perspective View</figcaption>
</figure>
<figure class="media-item">
<img src="/assets/images/vexbot/BallCollRob-P2.jpeg" alt="Perspective view of Vexbot"
loading="lazy">
<figcaption>4. Perspective View</figcaption>
</figure>
</div>
<div class="scroller-dots">
<span class="dot active"></span>
<span class="dot"></span>
<span class="dot"></span>
<span class="dot"></span>
</div>
</div>
<div class="project-info">
<h2 class="project-header">Vexbot</h2>
<ul class="project-tools">
<li class="project-tool-header">Tools</li>
<li>C++</li>
<li>Vex Vision Sensors</li>
<li>Vex Motors</li>
<li>Vex Distance Sensors</li>
</ul>
<ul class="project-skills">
<li class="project-skill-header">Skills</li>
<li>Object-Oriented Programming</li>
<li>Sensor Characterization</li>
<li>Prototyping</li>
</ul>
<p class="project-description">
I designed and built a Vexbot that uses vision sensors' color recognition cability to identify
and
collect coloured balls. The robot would then use the same sensor to scan for and
identify a delivery location, and would use a distance sensor to detect when
it was close enough to drop off the balls.
</p>
</div>
</section>
<!-- OBSIDIAN PLUGIN -->
<section class="project-block">
<div class="project-media">
<div class="media-scroller">
<figure class="media-item">
<video autoplay muted loop playsinline poster="/assets/images/obsidian/code.png">
<source src="/assets/videos/obsidian/vid.mp4" type="video/mp4">
Your browser does not support the video tag.
</video>
<figcaption>1. Demo of plugin</figcaption>
</figure>
<figure class="media-item">
<img src="/assets/images/obsidian/plot.png" alt="dark mode embedded plot in Obsidian"
loading="lazy">
<figcaption>2. Dark mode interactive plot</figcaption>
</figure>
<figure class="media-item">
<img src="/assets/images/obsidian/plot-light.png"
alt="light mode embedded plot in Obsidian" loading="lazy">
<figcaption>3. Light mode interactive plot</figcaption>
</figure>
<figure class="media-item">
<img src="/assets/images/obsidian/code.png" alt="code block for plot in Obsidian"
loading="lazy">
<figcaption>4. Code block for plot in Obsidian</figcaption>
</figure>
</div>
<div class="scroller-dots">
<span class="dot active"></span>
<span class="dot"></span>
<span class="dot"></span>
<span class="dot"></span>
</div>
</div>
<div class="project-info">
<h2 class="project-header">Obsidian Interactive Plot Plugin</h2>
<ul class="project-tools">
<li class="project-tool-header">Tools</li>
<li>CSS</li>
<li>JavaScript</li>
<li>Python</li>
</ul>
<ul class="project-skills">
<li class="project-skill-header">Skills</li>
<li>Object-Oriented Programming</li>
<li>Event-Driven Architecture</li>
<li>Inter-Process Communication</li>
<li>Resource Management</li>
</ul>
<a href="https://github.com/nlodder/local-python-obsidian" target="_blank" class="social-link">
<i class="fa-brands fa-github"></i>
<span>github repository</span>
</a>
<p class="project-description">
I developed a custom Obsidian plugin that integrates a live Python
execution environment directly into Markdown notes. Designed to replace
static hand-drawn plots with interactive data visualizations, the plugin
uses Node.js child processes to bridge Obsidian with the
Python/Plotly ecosystem. It features a custom CSS-to-Python theme engine
that dynamically injects Obsidian's UI variables into the rendering pipeline,
ensuring that plots automatically adapt their color palettes
during theme shifts (see photos) for a native user experience.<br><br>
I wrote this plugin to improve my knowledge management environment and engineering
and learning workflows. I plan to create more quality of life features such
as quick-plot shortcuts and other UI shortcuts (slider, variable manipulators etc.)
as time permits.<br><br>
NOTE: There is currently an issue where the plugin does not render regular python code
blocks properly, only rendering when the tage is 'python local-py'. I am actively working
on resolving this issue.<br><br>
NOTE: Development is on hold while I determine how to protect against recent and anticipated
supply chain vulnerabilities in python dependencies.
</p>
</div>
</section>
</main>
<footer>
<p>2026 | Nathan Lodder</p>
</footer>
</div>
<script>
// 2. Media Scroller Logic
document.querySelectorAll('.media-scroller').forEach((scroller) => {
let isScrolling = false;
const container = scroller.closest('.project-media');
const dotsContainer = container?.querySelector('.scroller-dots');
const items = scroller.querySelectorAll('.media-item');
// Helper to get exact scroll distance between items
const getScrollStep = () => {
if (items.length <= 1) return scroller.clientWidth;
return items[1].offsetLeft - items[0].offsetLeft;
};
// A. Click on dots to jump to image
if (dotsContainer) {
const dots = dotsContainer.querySelectorAll('.dot');
dots.forEach((dot, i) => {
dot.addEventListener('click', () => {
scroller.scrollTo({
left: i * getScrollStep(),
behavior: 'smooth'
});
});
});
}
// B. Wheel Scroll Logic (Desktop)
scroller.addEventListener('wheel', (evt) => {
if (window.innerWidth > 768) {
// Only prevent default if we are scrolling horizontally
if (Math.abs(evt.deltaY) > Math.abs(evt.deltaX)) {
evt.preventDefault();
if (isScrolling) return;
const threshold = 10;
if (Math.abs(evt.deltaY) > threshold) {
isScrolling = true;
const direction = evt.deltaY > 0 ? 1 : -1;
scroller.scrollBy({
left: getScrollStep() * direction,
behavior: 'smooth'
});
setTimeout(() => { isScrolling = false; }, 500);
}
}
}
}, { passive: false });
// C. Update Dots on Scroll
scroller.addEventListener('scroll', () => {
if (!dotsContainer || items.length <= 1) return;
const dots = dotsContainer.querySelectorAll('.dot');
const index = Math.round(scroller.scrollLeft / getScrollStep());
dots.forEach((dot, i) => dot.classList.toggle('active', i === index));
});
});
</script>
</body>
</html>