summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--11/Cargo.toml15
-rw-r--r--11/input.txt10
-rw-r--r--11/src/part-1.rs76
-rw-r--r--11/src/part-2.rs62
-rw-r--r--11/src/tensor.rs175
-rw-r--r--11/test-2.txt5
-rw-r--r--11/test.txt10
7 files changed, 353 insertions, 0 deletions
diff --git a/11/Cargo.toml b/11/Cargo.toml
new file mode 100644
index 0000000..53c5fdb
--- /dev/null
+++ b/11/Cargo.toml
@@ -0,0 +1,15 @@
+[package]
+name = "day-11"
+version = "0.1.0"
+authors = ["Gard Spreemann <gspr@nonempty.org>"]
+edition = "2021"
+
+[[bin]]
+name = "part-1"
+path = "src/part-1.rs"
+
+[[bin]]
+name = "part-2"
+path = "src/part-2.rs"
+
+[dependencies]
diff --git a/11/input.txt b/11/input.txt
new file mode 100644
index 0000000..c93f06a
--- /dev/null
+++ b/11/input.txt
@@ -0,0 +1,10 @@
+4525436417
+1851242553
+5421435521
+8431325447
+4517438332
+3521262111
+3331541734
+4351836641
+2753881442
+7717616863
diff --git a/11/src/part-1.rs b/11/src/part-1.rs
new file mode 100644
index 0000000..163787a
--- /dev/null
+++ b/11/src/part-1.rs
@@ -0,0 +1,76 @@
+mod tensor;
+
+use std::io::{BufRead};
+
+pub type Matrix<T> = tensor::Tensor<T, 2>;
+pub type Index = tensor::Index<2>;
+
+
+pub fn main() {
+ let args: Vec<String> = std::env::args().collect();
+ if args.len() != 2 { panic!("Need exactly 1 argument"); }
+ let num_steps: usize = args[1].parse().expect("Need number of steps as argument");
+
+ let stdin = std::io::stdin();
+ let handle = stdin.lock();
+
+ let mut energy: Matrix<usize> = {
+ let mut tmp: Vec<usize> = Vec::new();
+ let mut m: usize = 0;
+ for l in handle.lines() {
+ let line = l.unwrap();
+ tmp.extend(line.chars().map(|c| c.to_digit(10).unwrap() as usize));
+ m += 1;
+ }
+ let n = tmp.len()/m;
+ Matrix::new_from([m, n], tmp)
+ };
+ let m = energy.shape()[0];
+ let n = energy.shape()[1];
+ let mut num_flashes: usize = 0;
+
+ let mut can_flash: Matrix<bool> = Matrix::new(energy.shape().clone(), true);
+ let mut to_increase: Vec<Index> = Vec::new();
+ for s in 0..num_steps {
+ //println!("Step {}.", s);
+
+ can_flash.fill(true);
+ to_increase.clear();
+ for i in 0..m {
+ for j in 0..n {
+ to_increase.push([i, j]);
+ }
+ }
+
+ while let Some(idx) = to_increase.pop() {
+ if can_flash[idx] {
+ energy[idx] += 1;
+ if energy[idx] > 9 {
+ let neighbors: [Index; 8] = [[idx[0] , idx[1].wrapping_sub(1)], [idx[0] , idx[1]+1],
+ [idx[0].wrapping_sub(1) , idx[1] ], [idx[0]+1, idx[1] ],
+ [idx[0].wrapping_sub(1) , idx[1].wrapping_sub(1)], [idx[0]+1, idx[1]+1],
+ [idx[0].wrapping_sub(1) , idx[1]+1 ], [idx[0]+1, idx[1].wrapping_sub(1)] ];
+ energy[idx] = 0;
+ can_flash[idx] = false;
+ num_flashes += 1;
+ for neighbor in (& neighbors).into_iter().filter(|neighbor| energy.in_bounds(neighbor)) {
+ to_increase.push(*neighbor);
+ }
+ }
+ }
+ }
+
+ /*
+ println!("Total flashes: {}", num_flashes);
+ for i in 0..m {
+ for j in 0..n {
+ print!("{}", energy[[i, j]]);
+ }
+ println!("");
+ }
+ println!("....");
+ */
+ }
+
+ println!("Number of flashes: {}", num_flashes);
+}
diff --git a/11/src/part-2.rs b/11/src/part-2.rs
new file mode 100644
index 0000000..3bdb50e
--- /dev/null
+++ b/11/src/part-2.rs
@@ -0,0 +1,62 @@
+mod tensor;
+
+use std::io::{BufRead};
+
+pub type Matrix<T> = tensor::Tensor<T, 2>;
+pub type Index = tensor::Index<2>;
+
+
+pub fn main() {
+ let stdin = std::io::stdin();
+ let handle = stdin.lock();
+
+ let mut energy: Matrix<usize> = {
+ let mut tmp: Vec<usize> = Vec::new();
+ let mut m: usize = 0;
+ for l in handle.lines() {
+ let line = l.unwrap();
+ tmp.extend(line.chars().map(|c| c.to_digit(10).unwrap() as usize));
+ m += 1;
+ }
+ let n = tmp.len()/m;
+ Matrix::new_from([m, n], tmp)
+ };
+ let m = energy.shape()[0];
+ let n = energy.shape()[1];
+
+ let mut can_flash: Matrix<bool> = Matrix::new(energy.shape().clone(), true);
+ let mut to_increase: Vec<Index> = Vec::new();
+ for s in 0.. {
+ can_flash.fill(true);
+ to_increase.clear();
+ for i in 0..m {
+ for j in 0..n {
+ to_increase.push([i, j]);
+ }
+ }
+
+ while let Some(idx) = to_increase.pop() {
+ if can_flash[idx] {
+ energy[idx] += 1;
+ if energy[idx] > 9 {
+ let neighbors: [Index; 8] = [[idx[0] , idx[1].wrapping_sub(1)], [idx[0] , idx[1]+1],
+ [idx[0].wrapping_sub(1) , idx[1] ], [idx[0]+1, idx[1] ],
+ [idx[0].wrapping_sub(1) , idx[1].wrapping_sub(1)], [idx[0]+1, idx[1]+1],
+ [idx[0].wrapping_sub(1) , idx[1]+1 ], [idx[0]+1, idx[1].wrapping_sub(1)] ];
+ energy[idx] = 0;
+ can_flash[idx] = false;
+ for neighbor in (& neighbors).into_iter().filter(|neighbor| energy.in_bounds(neighbor)) {
+ to_increase.push(*neighbor);
+ }
+ }
+ }
+ }
+
+ let e = energy[[0,0]];
+ if energy.data().into_iter().all(|& x| x == e) {
+ println!("Synchronized after step {} (so answer is step {})", s, s+1);
+ break;
+ }
+ }
+
+}
diff --git a/11/src/tensor.rs b/11/src/tensor.rs
new file mode 100644
index 0000000..6898701
--- /dev/null
+++ b/11/src/tensor.rs
@@ -0,0 +1,175 @@
+#![allow(dead_code)]
+
+pub type Shape<const D: usize> = [usize; D];
+pub type Index<const D: usize> = Shape<D>;
+
+pub struct Tensor<T, const D: usize> {
+ data: Vec<T>,
+ shape: Shape<D>,
+ strides: Shape<D>
+}
+
+pub type Tensor1<T> = Tensor<T, 1>;
+pub type Tensor2<T> = Tensor<T, 2>;
+pub type Tensor3<T> = Tensor<T, 3>;
+pub type Tensor4<T> = Tensor<T, 4>;
+pub type Index1 = Index<1>;
+pub type Index2 = Index<2>;
+pub type Index3 = Index<3>;
+pub type Index4 = Index<4>;
+
+
+impl<T: Copy, const D: usize> Tensor<T, D> {
+ pub fn new(shape: Shape<D>, x: T) -> Self {
+ let dim = D;
+ if dim == 0 { panic!("Empty shape not allowed."); }
+
+ let mut len = shape[dim-1];
+ let mut strides: Shape<D> = [0; D];
+ strides[dim-1] = 1;
+ for d in (1..dim).rev() { // d=dim-1, …, 1.
+ strides[d-1] = shape[d]*strides[d];
+ len *= shape[d-1];
+ }
+ if len == 0 { panic!("Empty dimensions not allowed."); }
+
+ Self {
+ data: vec![x; len],
+ shape: shape,
+ strides: strides,
+ }
+ }
+
+ pub fn new_from(shape: Shape<D>, x: Vec<T>) -> Self {
+ let dim = D;
+ if dim == 0 { panic!("Empty shape not allowed."); }
+
+ let mut len = shape[dim-1];
+ let mut strides: Shape<D> = [0; D];
+ strides[dim-1] = 1;
+ for d in (1..dim).rev() { // d=dim-1, …, 1.
+ strides[d-1] = shape[d]*strides[d];
+ len *= shape[d-1];
+ }
+ if len == 0 { panic!("Empty dimensions not allowed."); }
+
+ if len != x.len() { panic!("Vector of length {} cannot fill tensor with {} entries.", x.len(), len); }
+
+ Self {
+ data: x,
+ shape: shape,
+ strides: strides,
+ }
+ }
+
+
+ #[inline(always)]
+ fn flatten_idx(self: & Self, idx: & Index<D>) -> usize {
+ // NOTE: This is a very hot code path. Should benchmark versus explicit loop.
+ idx.iter().zip(self.strides.iter()).fold(0, |sum, (i, s)| sum + i*s)
+ }
+
+ fn bound_check_panic(self: & Self, idx: & Index<D>) -> () {
+ for d in 0..self.dim() {
+ let i = *(unsafe { idx.get_unchecked(d) });
+ if i >= self.shape[d] {
+ panic!("{}-dimensional tensor index is out of bounds in dimension {} ({} >= {}).", self.dim(), d, i, self.shape[d])
+ }
+ }
+ }
+
+ pub fn in_bounds(self: & Self, idx: & Index<D>) -> bool {
+ for d in 0..self.dim() {
+ let i = *(unsafe { idx.get_unchecked(d) });
+ if i >= self.shape[d] {
+ return false;
+ }
+ }
+ true
+ }
+
+ pub fn dim(self: & Self) -> usize { D }
+
+ pub fn shape(self: & Self) -> & Shape<D> { &self.shape }
+
+ pub fn el(self: & Self, idx: & Index<D>) -> Option<& T> {
+ if self.in_bounds(idx) { Some(unsafe { self.el_unchecked(idx) }) }
+ else { None }
+ }
+
+ pub unsafe fn el_unchecked(self: & Self, idx: & Index<D>) -> & T { self.data.get_unchecked(self.flatten_idx(idx)) }
+
+ pub unsafe fn el_unchecked_mut(self: &mut Self, idx: & Index<D>) -> &mut T {
+ let flat_idx = self.flatten_idx(idx);
+ self.data.get_unchecked_mut(flat_idx)
+ }
+
+ pub fn flat_len(self: & Self) -> usize { self.data.len() }
+ pub fn size(self: & Self) -> usize { self.flat_len()*std::mem::size_of::<T>() }
+
+ pub fn fill_with(self: &mut Self, x: & [T]) -> () {
+ // Already panics on size mismatch.
+ self.data.copy_from_slice(x)
+ }
+
+ pub fn fill(self: &mut Self, x: T) -> () {
+ self.data.fill(x);
+ }
+
+ pub fn data(self: & Self) -> & [T] { &self.data }
+}
+
+impl<T: Copy + std::fmt::Display> Tensor<T, 2> {
+ pub fn dirty_print(self: & Self) {
+ for i in 0..self.shape[0] {
+ for j in 0..self.shape[1] {
+ print!("{} ", self[[i, j]]);
+ }
+ println!("");
+ }
+ }
+}
+
+impl<T: Copy, const D: usize> std::ops::Index<Index<D>> for Tensor<T, D> {
+ type Output = T;
+
+ fn index(self: & Self, idx: Index<D>) -> & Self::Output {
+ self.bound_check_panic(&idx);
+ unsafe { self.el_unchecked(&idx) }
+ }
+}
+
+impl<T: Copy, const D: usize> std::ops::Index<& Index<D>> for Tensor<T, D> {
+ type Output = T;
+
+ fn index(self: & Self, idx: & Index<D>) -> & Self::Output {
+ self.bound_check_panic(idx);
+ unsafe { self.el_unchecked(idx) }
+ }
+}
+
+impl<T: Copy, const D: usize> std::ops::IndexMut<Index<D>> for Tensor<T, D> {
+ fn index_mut(self: &mut Self, idx: Index<D>) -> &mut Self::Output {
+ self.bound_check_panic(&idx);
+ unsafe { self.el_unchecked_mut(&idx) }
+ }
+}
+
+impl<T: Copy, const D: usize> std::ops::IndexMut<& Index<D>> for Tensor<T, D> {
+ fn index_mut(self: &mut Self, idx: & Index<D>) -> &mut Self::Output {
+ self.bound_check_panic(idx);
+ unsafe { self.el_unchecked_mut(idx) }
+ }
+}
+
+impl<T: Copy, const D: usize> IntoIterator for Tensor<T, D> {
+ type Item = T;
+ type IntoIter = std::vec::IntoIter<Self::Item>;
+
+ fn into_iter(self) -> Self::IntoIter { self.data.into_iter() }
+}
+
+
+// FIXME: Should have a proper IntoIter implementing IntoIter for &'a Tensor<T, D>.
+
+// Note: Tensor is also sliceable (due to the Index implementations)
diff --git a/11/test-2.txt b/11/test-2.txt
new file mode 100644
index 0000000..ae21dd2
--- /dev/null
+++ b/11/test-2.txt
@@ -0,0 +1,5 @@
+11111
+19991
+19191
+19991
+11111
diff --git a/11/test.txt b/11/test.txt
new file mode 100644
index 0000000..03743f6
--- /dev/null
+++ b/11/test.txt
@@ -0,0 +1,10 @@
+5483143223
+2745854711
+5264556173
+6141336146
+6357385478
+4167524645
+2176841721
+6882881134
+4846848554
+5283751526