diff options
Diffstat (limited to '08/src')
-rw-r--r-- | 08/src/part-1.rs | 91 | ||||
-rw-r--r-- | 08/src/part-2.rs | 90 | ||||
-rw-r--r-- | 08/src/tensor.rs | 171 |
3 files changed, 352 insertions, 0 deletions
diff --git a/08/src/part-1.rs b/08/src/part-1.rs new file mode 100644 index 0000000..1bbd75a --- /dev/null +++ b/08/src/part-1.rs @@ -0,0 +1,91 @@ +mod tensor; + +use std::io::{BufRead}; + +use tensor::{Index, Shape, Tensor}; + +fn main() { + let stdin = std::io::stdin(); + let mut handle = stdin.lock(); + + let mut tmp: Vec<(i8, bool)> = Vec::new(); + let mut m: usize = 0; + let mut n: usize = 0; + let mut n_check: usize = 0; + + loop { + let buf = handle.fill_buf().expect("IO error"); + let bytes_read = buf.len(); + if bytes_read == 0 { break; } + + for & b in buf.into_iter() { + if b >= b'0' && b <= b'9' { + tmp.push(((b - b'0').try_into().unwrap(), false)); + if m == 0 { + n += 1; + } + n_check += 1; + } + if b == b'\n' { + m += 1; + assert_eq!(n_check, n); + n_check = 0; + } + } + + handle.consume(bytes_read); + } + + let mut x: Tensor<(i8, bool), 2> = Tensor::new_from([m, n], tmp); + let shape = x.shape().clone(); + let mut tallest: i8; + + // Look north-south. + for j in 0..shape[1] { + // Look from north. + tallest = -1; + for i in 0..shape[0] { + let tree = &mut x[[i, j]]; + if tree.0 > tallest { + tree.1 = true; + tallest = tree.0; + } + } + + // Look from south. + tallest = -1; + for i in (0..shape[0]).rev() { + let tree = &mut x[[i, j]]; + if tree.0 > tallest { + tree.1 = true; + tallest = tree.0; + } + } + } + + // Look east-west. + for i in 0..shape[0] { + // Look from west. + tallest = -1; + for j in 0..shape[1] { + let tree = &mut x[[i, j]]; + if tree.0 > tallest { + tree.1 = true; + tallest = tree.0; + } + } + + // Look from east. + tallest = -1; + for j in (0..shape[1]).rev() { + let tree = &mut x[[i, j]]; + if tree.0 > tallest { + tree.1 = true; + tallest = tree.0; + } + } + } + + let num_visible = x.data().into_iter().filter(|(_, vis)| *vis).count(); + println!("{}", num_visible); +} diff --git a/08/src/part-2.rs b/08/src/part-2.rs new file mode 100644 index 0000000..94bf4d5 --- /dev/null +++ b/08/src/part-2.rs @@ -0,0 +1,90 @@ +mod tensor; + +use std::cmp::{max}; +use std::io::{BufRead}; + +use tensor::{Index, Shape, Tensor}; + +fn main() { + let stdin = std::io::stdin(); + let mut handle = stdin.lock(); + + let mut tmp: Vec<usize> = Vec::new(); + let mut m: usize = 0; + let mut n: usize = 0; + let mut n_check: usize = 0; + + loop { + let buf = handle.fill_buf().expect("IO error"); + let bytes_read = buf.len(); + if bytes_read == 0 { break; } + + for & b in buf.into_iter() { + if b >= b'0' && b <= b'9' { + tmp.push((b - b'0').try_into().unwrap()); + if m == 0 { + n += 1; + } + n_check += 1; + } + if b == b'\n' { + m += 1; + assert_eq!(n_check, n); + n_check = 0; + } + } + + handle.consume(bytes_read); + } + + let mut x: Tensor<usize, 2> = Tensor::new_from([m, n], tmp); + let shape = x.shape().clone(); + + let mut highest_score: usize = 0; + + for i in 0..m { + for j in 0..n { + let h = x[[i, j]]; + + // Look north + let mut north_dist = 0; + for a in (0..i).rev() { + north_dist += 1; + if x[[a, j]] >= h { + break; + } + } + + // Look south. + let mut south_dist = 0; + for a in i+1..m { + south_dist += 1; + if x[[a, j]] >= h { + break; + } + } + + // Look east. + let mut east_dist = 0; + for a in j+1..n { + east_dist += 1; + if x[[i, a]] >= h { + break; + } + } + + // Look west. + let mut west_dist = 0; + for a in (0..j).rev() { + west_dist += 1; + if x[[i, a]] >= h { + break; + } + } + + highest_score = max(highest_score, north_dist*south_dist*east_dist*west_dist); + } + } + + println!("{}", highest_score); +} diff --git a/08/src/tensor.rs b/08/src/tensor.rs new file mode 100644 index 0000000..2635886 --- /dev/null +++ b/08/src/tensor.rs @@ -0,0 +1,171 @@ +#![allow(dead_code)] + +pub type Shape<const D: usize> = [usize; D]; +pub type Index<const D: usize> = Shape<D>; + +pub struct Tensor<T, const D: usize> { + data: Vec<T>, + shape: Shape<D>, + strides: Shape<D> +} + +pub type Tensor1<T> = Tensor<T, 1>; +pub type Tensor2<T> = Tensor<T, 2>; +pub type Tensor3<T> = Tensor<T, 3>; +pub type Tensor4<T> = Tensor<T, 4>; +pub type Index1 = Index<1>; +pub type Index2 = Index<2>; +pub type Index3 = Index<3>; +pub type Index4 = Index<4>; + + +impl<T: Copy, const D: usize> Tensor<T, D> { + pub fn new(shape: Shape<D>, x: T) -> Self { + if D == 0 { panic!("Empty shape not allowed."); } + + let mut len = shape[D-1]; + let mut strides: Shape<D> = [0; D]; + strides[D-1] = 1; + for d in (1..D).rev() { // d=D-1, …, 1. + strides[d-1] = shape[d]*strides[d]; + len *= shape[d-1]; + } + if len == 0 { panic!("Empty dimensions not allowed."); } + + Self { + data: vec![x; len], + shape: shape, + strides: strides, + } + } + + pub fn new_from(shape: Shape<D>, x: Vec<T>) -> Self { + if D == 0 { panic!("Empty shape not allowed."); } + + let mut len = shape[D-1]; + let mut strides: Shape<D> = [0; D]; + strides[D-1] = 1; + for d in (1..D).rev() { // d=D-1, …, 1. + strides[d-1] = shape[d]*strides[d]; + len *= shape[d-1]; + } + if len == 0 { panic!("Empty dimensions not allowed."); } + + if len != x.len() { panic!("Vector of length {} cannot fill tensor with {} entries.", x.len(), len); } + + Self { + data: x, + shape: shape, + strides: strides, + } + } + + + #[inline(always)] + fn flatten_idx(self: & Self, idx: & Index<D>) -> usize { + // NOTE: This is a very hot code path. Should benchmark versus explicit loop. + idx.iter().zip(self.strides.iter()).fold(0, |sum, (i, s)| sum + i*s) + } + + fn bound_check_panic(self: & Self, idx: & Index<D>) -> () { + for d in 0..D { + let i = *(unsafe { idx.get_unchecked(d) }); + if i >= self.shape[d] { + panic!("{}-dimensional tensor index is out of bounds in dimension {} ({} >= {}).", D, d, i, self.shape[d]) + } + } + } + + pub fn in_bounds(self: & Self, idx: & Index<D>) -> bool { + for d in 0..D { + let i = *(unsafe { idx.get_unchecked(d) }); + if i >= self.shape[d] { + return false; + } + } + true + } + + pub fn shape(self: & Self) -> & Shape<D> { &self.shape } + + pub fn el(self: & Self, idx: & Index<D>) -> Option<& T> { + if self.in_bounds(idx) { Some(unsafe { self.el_unchecked(idx) }) } + else { None } + } + + pub unsafe fn el_unchecked(self: & Self, idx: & Index<D>) -> & T { self.data.get_unchecked(self.flatten_idx(idx)) } + + pub unsafe fn el_unchecked_mut(self: &mut Self, idx: & Index<D>) -> &mut T { + let flat_idx = self.flatten_idx(idx); + self.data.get_unchecked_mut(flat_idx) + } + + pub fn flat_len(self: & Self) -> usize { self.data.len() } + pub fn size(self: & Self) -> usize { self.flat_len()*std::mem::size_of::<T>() } + + pub fn fill_with(self: &mut Self, x: & [T]) -> () { + // Already panics on size mismatch. + self.data.copy_from_slice(x) + } + + pub fn fill(self: &mut Self, x: T) -> () { + self.data.fill(x); + } + + pub fn data(self: & Self) -> & [T] { & self.data } +} + +impl<T: Copy + std::fmt::Display> Tensor<T, 2> { + pub fn dirty_print(self: & Self) { + for i in 0..self.shape[0] { + for j in 0..self.shape[1] { + print!("{} ", self[[i, j]]); + } + println!(""); + } + } +} + +impl<T: Copy, const D: usize> std::ops::Index<Index<D>> for Tensor<T, D> { + type Output = T; + + fn index(self: & Self, idx: Index<D>) -> & Self::Output { + self.bound_check_panic(&idx); + unsafe { self.el_unchecked(&idx) } + } +} + +impl<T: Copy, const D: usize> std::ops::Index<& Index<D>> for Tensor<T, D> { + type Output = T; + + fn index(self: & Self, idx: & Index<D>) -> & Self::Output { + self.bound_check_panic(idx); + unsafe { self.el_unchecked(idx) } + } +} + +impl<T: Copy, const D: usize> std::ops::IndexMut<Index<D>> for Tensor<T, D> { + fn index_mut(self: &mut Self, idx: Index<D>) -> &mut Self::Output { + self.bound_check_panic(&idx); + unsafe { self.el_unchecked_mut(&idx) } + } +} + +impl<T: Copy, const D: usize> std::ops::IndexMut<& Index<D>> for Tensor<T, D> { + fn index_mut(self: &mut Self, idx: & Index<D>) -> &mut Self::Output { + self.bound_check_panic(idx); + unsafe { self.el_unchecked_mut(idx) } + } +} + +impl<T: Copy, const D: usize> IntoIterator for Tensor<T, D> { + type Item = T; + type IntoIter = std::vec::IntoIter<Self::Item>; + + fn into_iter(self) -> Self::IntoIter { self.data.into_iter() } +} + + +// FIXME: Should have a proper IntoIter implementing IntoIter for &'a Tensor<T, D>. + +// Note: Tensor is also sliceable (due to the Index implementations) |