From 567ff99d4a77f003aee0c533804d8fbd58ab8d9f Mon Sep 17 00:00:00 2001 From: Gard Spreemann Date: Sat, 18 Dec 2021 14:19:37 +0100 Subject: Day 11 --- 11/Cargo.toml | 15 +++++ 11/input.txt | 10 ++++ 11/src/part-1.rs | 76 ++++++++++++++++++++++++ 11/src/part-2.rs | 62 ++++++++++++++++++++ 11/src/tensor.rs | 175 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 11/test-2.txt | 5 ++ 11/test.txt | 10 ++++ 7 files changed, 353 insertions(+) create mode 100644 11/Cargo.toml create mode 100644 11/input.txt create mode 100644 11/src/part-1.rs create mode 100644 11/src/part-2.rs create mode 100644 11/src/tensor.rs create mode 100644 11/test-2.txt create mode 100644 11/test.txt diff --git a/11/Cargo.toml b/11/Cargo.toml new file mode 100644 index 0000000..53c5fdb --- /dev/null +++ b/11/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "day-11" +version = "0.1.0" +authors = ["Gard Spreemann "] +edition = "2021" + +[[bin]] +name = "part-1" +path = "src/part-1.rs" + +[[bin]] +name = "part-2" +path = "src/part-2.rs" + +[dependencies] diff --git a/11/input.txt b/11/input.txt new file mode 100644 index 0000000..c93f06a --- /dev/null +++ b/11/input.txt @@ -0,0 +1,10 @@ +4525436417 +1851242553 +5421435521 +8431325447 +4517438332 +3521262111 +3331541734 +4351836641 +2753881442 +7717616863 diff --git a/11/src/part-1.rs b/11/src/part-1.rs new file mode 100644 index 0000000..163787a --- /dev/null +++ b/11/src/part-1.rs @@ -0,0 +1,76 @@ +mod tensor; + +use std::io::{BufRead}; + +pub type Matrix = tensor::Tensor; +pub type Index = tensor::Index<2>; + + +pub fn main() { + let args: Vec = std::env::args().collect(); + if args.len() != 2 { panic!("Need exactly 1 argument"); } + let num_steps: usize = args[1].parse().expect("Need number of steps as argument"); + + let stdin = std::io::stdin(); + let handle = stdin.lock(); + + let mut energy: Matrix = { + let mut tmp: Vec = Vec::new(); + let mut m: usize = 0; + for l in handle.lines() { + let line = l.unwrap(); + tmp.extend(line.chars().map(|c| c.to_digit(10).unwrap() as usize)); + m += 1; + } + let n = tmp.len()/m; + Matrix::new_from([m, n], tmp) + }; + let m = energy.shape()[0]; + let n = energy.shape()[1]; + let mut num_flashes: usize = 0; + + let mut can_flash: Matrix = Matrix::new(energy.shape().clone(), true); + let mut to_increase: Vec = Vec::new(); + for s in 0..num_steps { + //println!("Step {}.", s); + + can_flash.fill(true); + to_increase.clear(); + for i in 0..m { + for j in 0..n { + to_increase.push([i, j]); + } + } + + while let Some(idx) = to_increase.pop() { + if can_flash[idx] { + energy[idx] += 1; + if energy[idx] > 9 { + let neighbors: [Index; 8] = [[idx[0] , idx[1].wrapping_sub(1)], [idx[0] , idx[1]+1], + [idx[0].wrapping_sub(1) , idx[1] ], [idx[0]+1, idx[1] ], + [idx[0].wrapping_sub(1) , idx[1].wrapping_sub(1)], [idx[0]+1, idx[1]+1], + [idx[0].wrapping_sub(1) , idx[1]+1 ], [idx[0]+1, idx[1].wrapping_sub(1)] ]; + energy[idx] = 0; + can_flash[idx] = false; + num_flashes += 1; + for neighbor in (& neighbors).into_iter().filter(|neighbor| energy.in_bounds(neighbor)) { + to_increase.push(*neighbor); + } + } + } + } + + /* + println!("Total flashes: {}", num_flashes); + for i in 0..m { + for j in 0..n { + print!("{}", energy[[i, j]]); + } + println!(""); + } + println!("...."); + */ + } + + println!("Number of flashes: {}", num_flashes); +} diff --git a/11/src/part-2.rs b/11/src/part-2.rs new file mode 100644 index 0000000..3bdb50e --- /dev/null +++ b/11/src/part-2.rs @@ -0,0 +1,62 @@ +mod tensor; + +use std::io::{BufRead}; + +pub type Matrix = tensor::Tensor; +pub type Index = tensor::Index<2>; + + +pub fn main() { + let stdin = std::io::stdin(); + let handle = stdin.lock(); + + let mut energy: Matrix = { + let mut tmp: Vec = Vec::new(); + let mut m: usize = 0; + for l in handle.lines() { + let line = l.unwrap(); + tmp.extend(line.chars().map(|c| c.to_digit(10).unwrap() as usize)); + m += 1; + } + let n = tmp.len()/m; + Matrix::new_from([m, n], tmp) + }; + let m = energy.shape()[0]; + let n = energy.shape()[1]; + + let mut can_flash: Matrix = Matrix::new(energy.shape().clone(), true); + let mut to_increase: Vec = Vec::new(); + for s in 0.. { + can_flash.fill(true); + to_increase.clear(); + for i in 0..m { + for j in 0..n { + to_increase.push([i, j]); + } + } + + while let Some(idx) = to_increase.pop() { + if can_flash[idx] { + energy[idx] += 1; + if energy[idx] > 9 { + let neighbors: [Index; 8] = [[idx[0] , idx[1].wrapping_sub(1)], [idx[0] , idx[1]+1], + [idx[0].wrapping_sub(1) , idx[1] ], [idx[0]+1, idx[1] ], + [idx[0].wrapping_sub(1) , idx[1].wrapping_sub(1)], [idx[0]+1, idx[1]+1], + [idx[0].wrapping_sub(1) , idx[1]+1 ], [idx[0]+1, idx[1].wrapping_sub(1)] ]; + energy[idx] = 0; + can_flash[idx] = false; + for neighbor in (& neighbors).into_iter().filter(|neighbor| energy.in_bounds(neighbor)) { + to_increase.push(*neighbor); + } + } + } + } + + let e = energy[[0,0]]; + if energy.data().into_iter().all(|& x| x == e) { + println!("Synchronized after step {} (so answer is step {})", s, s+1); + break; + } + } + +} diff --git a/11/src/tensor.rs b/11/src/tensor.rs new file mode 100644 index 0000000..6898701 --- /dev/null +++ b/11/src/tensor.rs @@ -0,0 +1,175 @@ +#![allow(dead_code)] + +pub type Shape = [usize; D]; +pub type Index = Shape; + +pub struct Tensor { + data: Vec, + shape: Shape, + strides: Shape +} + +pub type Tensor1 = Tensor; +pub type Tensor2 = Tensor; +pub type Tensor3 = Tensor; +pub type Tensor4 = Tensor; +pub type Index1 = Index<1>; +pub type Index2 = Index<2>; +pub type Index3 = Index<3>; +pub type Index4 = Index<4>; + + +impl Tensor { + pub fn new(shape: Shape, x: T) -> Self { + let dim = D; + if dim == 0 { panic!("Empty shape not allowed."); } + + let mut len = shape[dim-1]; + let mut strides: Shape = [0; D]; + strides[dim-1] = 1; + for d in (1..dim).rev() { // d=dim-1, …, 1. + strides[d-1] = shape[d]*strides[d]; + len *= shape[d-1]; + } + if len == 0 { panic!("Empty dimensions not allowed."); } + + Self { + data: vec![x; len], + shape: shape, + strides: strides, + } + } + + pub fn new_from(shape: Shape, x: Vec) -> Self { + let dim = D; + if dim == 0 { panic!("Empty shape not allowed."); } + + let mut len = shape[dim-1]; + let mut strides: Shape = [0; D]; + strides[dim-1] = 1; + for d in (1..dim).rev() { // d=dim-1, …, 1. + strides[d-1] = shape[d]*strides[d]; + len *= shape[d-1]; + } + if len == 0 { panic!("Empty dimensions not allowed."); } + + if len != x.len() { panic!("Vector of length {} cannot fill tensor with {} entries.", x.len(), len); } + + Self { + data: x, + shape: shape, + strides: strides, + } + } + + + #[inline(always)] + fn flatten_idx(self: & Self, idx: & Index) -> usize { + // NOTE: This is a very hot code path. Should benchmark versus explicit loop. + idx.iter().zip(self.strides.iter()).fold(0, |sum, (i, s)| sum + i*s) + } + + fn bound_check_panic(self: & Self, idx: & Index) -> () { + for d in 0..self.dim() { + let i = *(unsafe { idx.get_unchecked(d) }); + if i >= self.shape[d] { + panic!("{}-dimensional tensor index is out of bounds in dimension {} ({} >= {}).", self.dim(), d, i, self.shape[d]) + } + } + } + + pub fn in_bounds(self: & Self, idx: & Index) -> bool { + for d in 0..self.dim() { + let i = *(unsafe { idx.get_unchecked(d) }); + if i >= self.shape[d] { + return false; + } + } + true + } + + pub fn dim(self: & Self) -> usize { D } + + pub fn shape(self: & Self) -> & Shape { &self.shape } + + pub fn el(self: & Self, idx: & Index) -> Option<& T> { + if self.in_bounds(idx) { Some(unsafe { self.el_unchecked(idx) }) } + else { None } + } + + pub unsafe fn el_unchecked(self: & Self, idx: & Index) -> & T { self.data.get_unchecked(self.flatten_idx(idx)) } + + pub unsafe fn el_unchecked_mut(self: &mut Self, idx: & Index) -> &mut T { + let flat_idx = self.flatten_idx(idx); + self.data.get_unchecked_mut(flat_idx) + } + + pub fn flat_len(self: & Self) -> usize { self.data.len() } + pub fn size(self: & Self) -> usize { self.flat_len()*std::mem::size_of::() } + + pub fn fill_with(self: &mut Self, x: & [T]) -> () { + // Already panics on size mismatch. + self.data.copy_from_slice(x) + } + + pub fn fill(self: &mut Self, x: T) -> () { + self.data.fill(x); + } + + pub fn data(self: & Self) -> & [T] { &self.data } +} + +impl Tensor { + pub fn dirty_print(self: & Self) { + for i in 0..self.shape[0] { + for j in 0..self.shape[1] { + print!("{} ", self[[i, j]]); + } + println!(""); + } + } +} + +impl std::ops::Index> for Tensor { + type Output = T; + + fn index(self: & Self, idx: Index) -> & Self::Output { + self.bound_check_panic(&idx); + unsafe { self.el_unchecked(&idx) } + } +} + +impl std::ops::Index<& Index> for Tensor { + type Output = T; + + fn index(self: & Self, idx: & Index) -> & Self::Output { + self.bound_check_panic(idx); + unsafe { self.el_unchecked(idx) } + } +} + +impl std::ops::IndexMut> for Tensor { + fn index_mut(self: &mut Self, idx: Index) -> &mut Self::Output { + self.bound_check_panic(&idx); + unsafe { self.el_unchecked_mut(&idx) } + } +} + +impl std::ops::IndexMut<& Index> for Tensor { + fn index_mut(self: &mut Self, idx: & Index) -> &mut Self::Output { + self.bound_check_panic(idx); + unsafe { self.el_unchecked_mut(idx) } + } +} + +impl IntoIterator for Tensor { + type Item = T; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { self.data.into_iter() } +} + + +// FIXME: Should have a proper IntoIter implementing IntoIter for &'a Tensor. + +// Note: Tensor is also sliceable (due to the Index implementations) diff --git a/11/test-2.txt b/11/test-2.txt new file mode 100644 index 0000000..ae21dd2 --- /dev/null +++ b/11/test-2.txt @@ -0,0 +1,5 @@ +11111 +19991 +19191 +19991 +11111 diff --git a/11/test.txt b/11/test.txt new file mode 100644 index 0000000..03743f6 --- /dev/null +++ b/11/test.txt @@ -0,0 +1,10 @@ +5483143223 +2745854711 +5264556173 +6141336146 +6357385478 +4167524645 +2176841721 +6882881134 +4846848554 +5283751526 -- cgit v1.2.3