diff --git a/include/dav1d/picture.rs b/include/dav1d/picture.rs index 0acc9b448..13914d09f 100644 --- a/include/dav1d/picture.rs +++ b/include/dav1d/picture.rs @@ -1,7 +1,6 @@ use crate::include::common::validate::validate_input; use crate::include::dav1d::common::Dav1dDataProps; use crate::include::dav1d::common::Rav1dDataProps; -use crate::include::dav1d::dav1d::Dav1dRef; use crate::include::dav1d::headers::DRav1d; use crate::include::dav1d::headers::Dav1dFrameHeader; use crate::include::dav1d::headers::Dav1dITUTT35; @@ -17,12 +16,10 @@ use crate::src::c_arc::RawArc; use crate::src::error::Dav1dResult; use crate::src::error::Rav1dError; use crate::src::error::Rav1dError::EINVAL; -use crate::src::r#ref::Rav1dRef; use libc::ptrdiff_t; use libc::uintptr_t; use std::ffi::c_int; use std::ffi::c_void; -use std::ptr; use std::ptr::NonNull; use std::sync::Arc; @@ -92,22 +89,24 @@ pub struct Dav1dPicture { pub mastering_display_ref: Option>, // opaque, so we can change this pub itut_t35_ref: Option, Box<[Dav1dITUTT35]>>>>, // opaque, so we can change this pub reserved_ref: [uintptr_t; 4], - pub r#ref: Option>, + pub r#ref: Option>, // opaque, so we can change this pub allocator_data: Option>, } -#[derive(Clone)] -pub(crate) struct Rav1dPictureData { - pub data: [*mut c_void; 3], - pub allocator_data: Option>, +pub struct Rav1dPictureData { + pub(crate) data: [*mut c_void; 3], + pub(crate) allocator_data: Option>, + pub(crate) allocator: Rav1dPicAllocator, } -impl Default for Rav1dPictureData { - fn default() -> Self { - Self { - data: [ptr::null_mut(); 3], - allocator_data: Default::default(), - } +impl Drop for Rav1dPictureData { + fn drop(&mut self) { + let Self { + data, + allocator_data, + ref allocator, + } = *self; + allocator.dealloc_picture_data(data, allocator_data); } } @@ -121,14 +120,13 @@ impl Default for Rav1dPictureData { pub(crate) struct Rav1dPicture { pub seq_hdr: Option>>, pub frame_hdr: Option>>, - pub data: Rav1dPictureData, + pub data: Option>, pub stride: [ptrdiff_t; 2], pub p: Rav1dPictureParameters, pub m: Rav1dDataProps, pub content_light: Option>, pub mastering_display: Option>, pub itut_t35: Arc, Box<[Dav1dITUTT35]>>>, - pub r#ref: Option>, } impl From for Rav1dPicture { @@ -136,7 +134,7 @@ impl From for Rav1dPicture { let Dav1dPicture { seq_hdr: _, frame_hdr: _, - data, + data: _, stride, p, m, @@ -151,8 +149,8 @@ impl From for Rav1dPicture { mastering_display_ref, itut_t35_ref, reserved_ref: _, - r#ref, - allocator_data, + r#ref: data_ref, + allocator_data: _, } = value; Self { // We don't `.update_rav1d()` [`Rav1dSequenceHeader`] because it's meant to be read-only. @@ -161,10 +159,8 @@ impl From for Rav1dPicture { // We don't `.update_rav1d()` [`Rav1dFrameHeader`] because it's meant to be read-only. // Safety: `raw` came from [`RawArc::from_arc`]. frame_hdr: frame_hdr_ref.map(|raw| unsafe { raw.into_arc() }), - data: Rav1dPictureData { - data: data.map(|data| data.map_or_else(ptr::null_mut, NonNull::as_ptr)), - allocator_data, - }, + // Safety: `raw` came from [`RawArc::from_arc`]. + data: data_ref.map(|raw| unsafe { raw.into_arc() }), stride, p: p.into(), m: m.into(), @@ -177,7 +173,6 @@ impl From for Rav1dPicture { itut_t35: itut_t35_ref .map(|raw| unsafe { raw.into_arc() }) .unwrap_or_default(), - r#ref, } } } @@ -187,25 +182,23 @@ impl From for Dav1dPicture { let Rav1dPicture { seq_hdr, frame_hdr, - data: - Rav1dPictureData { - data, - allocator_data, - }, + data, stride, p, m, content_light, mastering_display, itut_t35, - r#ref, } = value; Self { // [`DRav1d::from_rav1d`] is called right after [`parse_seq_hdr`]. seq_hdr: seq_hdr.as_ref().map(|arc| (&arc.as_ref().dav1d).into()), // [`DRav1d::from_rav1d`] is called in [`parse_frame_hdr`]. frame_hdr: frame_hdr.as_ref().map(|arc| (&arc.as_ref().dav1d).into()), - data: data.map(NonNull::new), + data: data + .as_ref() + .map(|arc| arc.data.map(NonNull::new)) + .unwrap_or_default(), stride, p: p.into(), m: m.into(), @@ -221,8 +214,9 @@ impl From for Dav1dPicture { mastering_display_ref: mastering_display.map(RawArc::from_arc), itut_t35_ref: Some(itut_t35).map(RawArc::from_arc), reserved_ref: Default::default(), - r#ref, - allocator_data, + // Order flipped so that the borrow comes before the move. + allocator_data: data.as_ref().and_then(|arc| arc.allocator_data), + r#ref: data.map(RawArc::from_arc), } } } diff --git a/src/decode.rs b/src/decode.rs index 283574805..0f6b8a0c8 100644 --- a/src/decode.rs +++ b/src/decode.rs @@ -4564,7 +4564,7 @@ pub(crate) unsafe fn rav1d_decode_frame_exit( f: &mut Rav1dFrameData, retval: Rav1dResult, ) { - if !f.sr_cur.p.data.data[0].is_null() { + if f.sr_cur.p.data.is_some() { f.task_thread.error = AtomicI32::new(0); } let cf = f.frame_thread.cf.get_mut(); @@ -4669,8 +4669,7 @@ pub unsafe fn rav1d_submit_frame(c: &mut Rav1dContext) -> Rav1dResult { task_thread_lock = f.task_thread.cond.wait(task_thread_lock).unwrap(); } let out_delayed = &mut c.frame_thread.out_delayed[next as usize]; - if !out_delayed.p.data.data[0].is_null() || f.task_thread.error.load(Ordering::SeqCst) != 0 - { + if out_delayed.p.data.is_some() || f.task_thread.error.load(Ordering::SeqCst) != 0 { let first = c.task_thread.first.load(Ordering::SeqCst); if first + 1 < c.n_fc { c.task_thread.first.fetch_add(1, Ordering::SeqCst); @@ -4695,7 +4694,7 @@ pub unsafe fn rav1d_submit_frame(c: &mut Rav1dContext) -> Rav1dResult { c.cached_error = mem::replace(&mut error, Ok(())); *c.cached_error_props.get_mut().unwrap() = out_delayed.p.m.clone(); rav1d_thread_picture_unref(out_delayed); - } else if !out_delayed.p.data.data[0].is_null() { + } else if out_delayed.p.data.is_some() { let progress = out_delayed.progress.as_ref().unwrap()[1].load(Ordering::Relaxed); if (out_delayed.visible || c.output_invisible_frames) && progress != FRAME_ERROR { rav1d_thread_picture_ref(&mut c.out, out_delayed); @@ -4756,14 +4755,14 @@ pub unsafe fn rav1d_submit_frame(c: &mut Rav1dContext) -> Rav1dResult { if frame_hdr.frame_type.is_inter_or_switch() { if frame_hdr.primary_ref_frame != RAV1D_PRIMARY_REF_NONE { let pri_ref = frame_hdr.refidx[frame_hdr.primary_ref_frame as usize] as usize; - if c.refs[pri_ref].p.p.data.data[0].is_null() { + if c.refs[pri_ref].p.p.data.is_none() { on_error(f, c, out); return Err(EINVAL); } } for i in 0..7 { let refidx = frame_hdr.refidx[i] as usize; - if c.refs[refidx].p.p.data.data[0].is_null() + if c.refs[refidx].p.p.data.is_none() || (frame_hdr.size.width[0] * 2) < c.refs[refidx].p.p.p.w || (frame_hdr.size.height * 2) < c.refs[refidx].p.p.p.h || frame_hdr.size.width[0] > c.refs[refidx].p.p.p.w * 16 diff --git a/src/fg_apply.rs b/src/fg_apply.rs index 3976d0a29..56796268d 100644 --- a/src/fg_apply.rs +++ b/src/fg_apply.rs @@ -119,16 +119,20 @@ pub(crate) unsafe fn rav1d_prep_grain( let sz = out.p.h as isize * stride; if sz < 0 { memcpy( - (out.data.data[0] as *mut u8) + (out.data.as_ref().unwrap().data[0] as *mut u8) .offset(sz as isize) .offset(-(stride as isize)) as *mut c_void, - (r#in.data.data[0] as *mut u8) + (r#in.data.as_ref().unwrap().data[0] as *mut u8) .offset(sz as isize) .offset(-(stride as isize)) as *const c_void, -sz as usize, ); } else { - memcpy(out.data.data[0], r#in.data.data[0], sz as usize); + memcpy( + out.data.as_ref().unwrap().data[0], + r#in.data.as_ref().unwrap().data[0], + sz as usize, + ); } } @@ -140,10 +144,10 @@ pub(crate) unsafe fn rav1d_prep_grain( if sz < 0 { if data.num_uv_points[0] == 0 { memcpy( - (out.data.data[1] as *mut u8) + (out.data.as_ref().unwrap().data[1] as *mut u8) .offset(sz as isize) .offset(-(stride as isize)) as *mut c_void, - (r#in.data.data[1] as *mut u8) + (r#in.data.as_ref().unwrap().data[1] as *mut u8) .offset(sz as isize) .offset(-(stride as isize)) as *const c_void, -sz as usize, @@ -151,10 +155,10 @@ pub(crate) unsafe fn rav1d_prep_grain( } if data.num_uv_points[1] == 0 { memcpy( - (out.data.data[2] as *mut u8) + (out.data.as_ref().unwrap().data[2] as *mut u8) .offset(sz as isize) .offset(-(stride as isize)) as *mut c_void, - (r#in.data.data[2] as *mut u8) + (r#in.data.as_ref().unwrap().data[2] as *mut u8) .offset(sz as isize) .offset(-(stride as isize)) as *const c_void, -sz as usize, @@ -162,10 +166,18 @@ pub(crate) unsafe fn rav1d_prep_grain( } } else { if data.num_uv_points[0] == 0 { - memcpy(out.data.data[1], r#in.data.data[1], sz as usize); + memcpy( + out.data.as_ref().unwrap().data[1], + r#in.data.as_ref().unwrap().data[1], + sz as usize, + ); } if data.num_uv_points[1] == 0 { - memcpy(out.data.data[2], r#in.data.data[2], sz as usize); + memcpy( + out.data.as_ref().unwrap().data[2], + r#in.data.as_ref().unwrap().data[2], + sz as usize, + ); } } } @@ -188,7 +200,7 @@ pub(crate) unsafe fn rav1d_apply_grain_row( let ss_x = (r#in.p.layout != Rav1dPixelLayout::I444) as usize; let cpw = out.p.w as usize + ss_x >> ss_x; let is_id = seq_hdr.mtrx == Rav1dMatrixCoefficients::IDENTITY; - let luma_src = (r#in.data.data[0] as *mut BD::Pixel) + let luma_src = (r#in.data.as_ref().unwrap().data[0] as *mut BD::Pixel) .offset(((row * 32) as isize * BD::pxstride(r#in.stride[0])) as isize); let bitdepth_max = (1 << out.p.bpc) - 1; let bd = BD::from_c(bitdepth_max); @@ -196,7 +208,7 @@ pub(crate) unsafe fn rav1d_apply_grain_row( if data.num_y_points != 0 { let bh = cmp::min(out.p.h as usize - row * 32, 32); dsp.fgy_32x32xn.call( - (out.data.data[0] as *mut BD::Pixel) + (out.data.as_ref().unwrap().data[0] as *mut BD::Pixel) .offset(((row * 32) as isize * BD::pxstride(out.stride[0])) as isize), luma_src.cast(), out.stride[0], @@ -229,8 +241,9 @@ pub(crate) unsafe fn rav1d_apply_grain_row( if data.chroma_scaling_from_luma { for pl in 0..2 { dsp.fguv_32x32xn[r#in.p.layout.try_into().unwrap()].call( - (out.data.data[1 + pl] as *mut BD::Pixel).offset(uv_off as isize), - (r#in.data.data[1 + pl] as *const BD::Pixel).offset(uv_off as isize), + (out.data.as_ref().unwrap().data[1 + pl] as *mut BD::Pixel).offset(uv_off as isize), + (r#in.data.as_ref().unwrap().data[1 + pl] as *const BD::Pixel) + .offset(uv_off as isize), r#in.stride[1], data, cpw, @@ -249,8 +262,10 @@ pub(crate) unsafe fn rav1d_apply_grain_row( for pl in 0..2 { if data.num_uv_points[pl] != 0 { dsp.fguv_32x32xn[r#in.p.layout.try_into().unwrap()].call( - (out.data.data[1 + pl] as *mut BD::Pixel).offset(uv_off as isize), - (r#in.data.data[1 + pl] as *const BD::Pixel).offset(uv_off as isize), + (out.data.as_ref().unwrap().data[1 + pl] as *mut BD::Pixel) + .offset(uv_off as isize), + (r#in.data.as_ref().unwrap().data[1 + pl] as *const BD::Pixel) + .offset(uv_off as isize), r#in.stride[1], data_c, cpw, diff --git a/src/lib.rs b/src/lib.rs index 89350ebde..4253da7fa 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -399,7 +399,7 @@ unsafe fn output_image(c: &mut Rav1dContext, out: &mut Rav1dPicture) -> Rav1dRes } rav1d_thread_picture_unref(&mut *r#in); - if !c.all_layers && c.max_spatial_id && !(c.out.p.data.data[0]).is_null() { + if !c.all_layers && c.max_spatial_id && c.out.p.data.is_some() { rav1d_thread_picture_move_ref(r#in, &mut c.out); } res @@ -410,7 +410,7 @@ unsafe fn output_picture_ready(c: &mut Rav1dContext, drain: bool) -> bool { return true; } if !c.all_layers && c.max_spatial_id { - if !c.out.p.data.data[0].is_null() && !c.cache.p.data.data[0].is_null() { + if c.out.p.data.is_some() && c.cache.p.data.is_some() { if c.max_spatial_id == (c.cache.p.frame_hdr.as_ref().unwrap().spatial_id != 0) || c.out.flags.contains(PictureFlags::NEW_TEMPORAL_UNIT) { @@ -420,17 +420,17 @@ unsafe fn output_picture_ready(c: &mut Rav1dContext, drain: bool) -> bool { rav1d_thread_picture_move_ref(&mut c.cache, &mut c.out); return false; } else { - if !c.cache.p.data.data[0].is_null() && drain { + if c.cache.p.data.is_some() && drain { return true; } else { - if !c.out.p.data.data[0].is_null() { + if c.out.p.data.is_some() { rav1d_thread_picture_move_ref(&mut c.cache, &mut c.out); return false; } } } } - !c.out.p.data.data[0].is_null() + c.out.p.data.is_some() } unsafe fn drain_picture(c: &mut Rav1dContext, out: &mut Rav1dPicture) -> Rav1dResult { @@ -444,8 +444,7 @@ unsafe fn drain_picture(c: &mut Rav1dContext, out: &mut Rav1dPicture) -> Rav1dRe task_thread_lock = f.task_thread.cond.wait(task_thread_lock).unwrap(); } let out_delayed = &mut c.frame_thread.out_delayed[next as usize]; - if !out_delayed.p.data.data[0].is_null() || f.task_thread.error.load(Ordering::SeqCst) != 0 - { + if out_delayed.p.data.is_some() || f.task_thread.error.load(Ordering::SeqCst) != 0 { let first: c_uint = c.task_thread.first.load(Ordering::SeqCst); if first.wrapping_add(1 as c_uint) < c.n_fc { c.task_thread.first.fetch_add(1, Ordering::SeqCst); @@ -478,7 +477,7 @@ unsafe fn drain_picture(c: &mut Rav1dContext, out: &mut Rav1dPicture) -> Rav1dRe rav1d_thread_picture_unref(out_delayed); return error; } - if !(out_delayed.p.data.data[0]).is_null() { + if out_delayed.p.data.is_some() { let progress = out_delayed.progress.as_ref().unwrap()[1].load(Ordering::Relaxed); if (out_delayed.visible || c.output_invisible_frames) && progress != FRAME_ERROR { rav1d_thread_picture_ref(&mut c.out, out_delayed); diff --git a/src/obu.rs b/src/obu.rs index b4c3bf55d..8e22c5462 100644 --- a/src/obu.rs +++ b/src/obu.rs @@ -2512,7 +2512,12 @@ unsafe fn parse_obus( } _ => {} } - if c.refs[frame_hdr.existing_frame_idx as usize].p.p.data.data[0].is_null() { + if c.refs[frame_hdr.existing_frame_idx as usize] + .p + .p + .data + .is_none() + { return Err(EINVAL); } if c.strict_std_compliance && !c.refs[frame_hdr.existing_frame_idx as usize].p.showable @@ -2547,9 +2552,7 @@ unsafe fn parse_obus( task_thread_lock = f.task_thread.cond.wait(task_thread_lock).unwrap(); } let out_delayed = &mut c.frame_thread.out_delayed[next as usize]; - if !out_delayed.p.data.data[0].is_null() - || f.task_thread.error.load(Ordering::SeqCst) != 0 - { + if out_delayed.p.data.is_some() || f.task_thread.error.load(Ordering::SeqCst) != 0 { let first = c.task_thread.first.load(Ordering::SeqCst); if first + 1 < c.n_fc { c.task_thread.first.fetch_add(1, Ordering::SeqCst); @@ -2573,7 +2576,7 @@ unsafe fn parse_obus( c.cached_error = mem::replace(&mut *error, Ok(())); *c.cached_error_props.get_mut().unwrap() = out_delayed.p.m.clone(); rav1d_thread_picture_unref(out_delayed); - } else if !(out_delayed.p.data.data[0]).is_null() { + } else if out_delayed.p.data.is_some() { let progress = out_delayed.progress.as_ref().unwrap()[1].load(Ordering::Relaxed); if (out_delayed.visible || c.output_invisible_frames) && progress != FRAME_ERROR diff --git a/src/picture.rs b/src/picture.rs index 36d56007a..3a0dd944d 100644 --- a/src/picture.rs +++ b/src/picture.rs @@ -1,4 +1,3 @@ -use crate::include::common::validate::validate_input; use crate::include::dav1d::common::Rav1dDataProps; use crate::include::dav1d::dav1d::Rav1dEventFlags; use crate::include::dav1d::headers::DRav1d; @@ -29,9 +28,6 @@ use crate::src::mem::rav1d_mem_pool_pop; use crate::src::mem::rav1d_mem_pool_push; use crate::src::mem::Rav1dMemPool; use crate::src::mem::Rav1dMemPoolBuffer; -use crate::src::r#ref::rav1d_ref_dec; -use crate::src::r#ref::rav1d_ref_inc; -use crate::src::r#ref::rav1d_ref_wrap; use atomig::Atom; use atomig::AtomLogic; use bitflags::bitflags; @@ -83,18 +79,12 @@ pub(crate) struct Rav1dThreadPicture { pub progress: Option>, } -#[repr(C)] -pub(crate) struct pic_ctx_context { - pub allocator: Rav1dPicAllocator, - pub pic: Rav1dPicture, -} - pub unsafe extern "C" fn dav1d_default_picture_alloc( p_c: *mut Dav1dPicture, cookie: *mut c_void, ) -> Dav1dResult { assert!(::core::mem::size_of::() <= RAV1D_PICTURE_ALIGNMENT); - let mut p = p_c.read().to::(); + let p = p_c.read().to::(); let hbd = (p.p.bpc > 8) as c_int; let aligned_w = p.p.w + 127 & !127; let aligned_h = p.p.h + 127 & !127; @@ -109,8 +99,7 @@ pub unsafe extern "C" fn dav1d_default_picture_alloc( if uv_stride & 1023 == 0 && has_chroma { uv_stride += RAV1D_PICTURE_ALIGNMENT as isize; } - p.stride[0] = y_stride; - p.stride[1] = uv_stride; + let stride = [y_stride, uv_stride]; let y_sz = (y_stride * aligned_h as isize) as usize; let uv_sz = (uv_stride * (aligned_h >> ss_ver) as isize) as usize; let pic_size = y_sz + 2 * uv_sz; @@ -129,11 +118,14 @@ pub unsafe extern "C" fn dav1d_default_picture_alloc( // were previously null instead of an empty slice when `!has_chroma`, // but this way is simpler and more uniform, especially when we move to slices. let data = [data0, data1, data2].map(|data| data.as_mut_ptr().cast()); - p.data = Rav1dPictureData { - data, - allocator_data: NonNull::new(buf.cast()), - }; - p_c.write(p.into()); + + (*p_c).stride = stride; + (*p_c).data = data.map(NonNull::new); + (*p_c).allocator_data = NonNull::new(buf.cast()); + // The caller will create the real `Rav1dPicture` from the `Dav1dPicture` fields set above, + // so we don't want to drop the `Rav1dPicture` we created for convenience here. + mem::forget(p); + Rav1dResult::Ok(()).into() } @@ -155,15 +147,6 @@ impl Default for Rav1dPicAllocator { } } -unsafe extern "C" fn free_buffer(_data: *const u8, user_data: *mut c_void) { - let pic_ctx: *mut pic_ctx_context = user_data as *mut pic_ctx_context; - let pic_ctx = Box::from_raw(pic_ctx); - let data = &pic_ctx.pic.data; - pic_ctx - .allocator - .dealloc_picture_data(data.data, data.allocator_data); -} - impl Rav1dPicAllocator { pub fn alloc_picture_data( &self, @@ -187,22 +170,18 @@ impl Rav1dPicAllocator { // Safety: `pic_c` is a valid `Dav1dPicture` with `data`, `stride`, `allocator_data` unset. let result = unsafe { (self.alloc_picture_callback)(&mut pic_c, self.cookie) }; result.try_to::().unwrap()?; + // `data`, `stride`, and `allocator_data` are the only fields set by the allocator. + // Of those, only `data` and `allocator_data` are read through `r#ref`, + // so we need to read those directly first and allocate the `Arc`. + let data = pic_c.data; + let allocator_data = pic_c.allocator_data; let mut pic = pic_c.to::(); - - let pic_ctx = Box::new(pic_ctx_context { + // TODO fallible allocation + pic.data = Some(Arc::new(Rav1dPictureData { + data: data.map(|data| data.unwrap().as_ptr()), + allocator_data, allocator: self.clone(), - pic: pic.clone(), - }); - // Safety: TODO(kkysen) Will be replaced by an `Arc` shortly. - pic.r#ref = NonNull::new(unsafe { - rav1d_ref_wrap( - pic.data.data[0] as *const u8, - Some(free_buffer), - Box::into_raw(pic_ctx).cast(), - ) - }); - assert!(pic.r#ref.is_some()); // TODO(kkysen) Will be removed soon anyways. - + })); Ok(pic) } @@ -235,7 +214,7 @@ unsafe fn picture_alloc_with_edges( bpc: c_int, p_allocator: &Rav1dPicAllocator, ) -> Rav1dResult { - if !p.data.data[0].is_null() { + if p.data.is_some() { writeln!(logger, "Picture already allocated!",); return Err(EGeneric); } @@ -311,8 +290,6 @@ pub(crate) unsafe fn rav1d_picture_alloc_copy( w: c_int, src: &Rav1dPicture, ) -> Rav1dResult { - let pic_ctx: *mut pic_ctx_context = - (*src).r#ref.unwrap().as_mut().user_data as *mut pic_ctx_context; picture_alloc_with_edges( &c.logger, dst, @@ -321,7 +298,7 @@ pub(crate) unsafe fn rav1d_picture_alloc_copy( src.seq_hdr.clone(), src.frame_hdr.clone(), src.p.bpc, - &mut (*pic_ctx).allocator, + &src.data.as_ref().unwrap().allocator, )?; rav1d_picture_copy_props( @@ -335,27 +312,10 @@ pub(crate) unsafe fn rav1d_picture_alloc_copy( } pub(crate) unsafe fn rav1d_picture_ref(dst: &mut Rav1dPicture, src: &Rav1dPicture) { - if validate_input!(dst.data.data[0].is_null()).is_err() { - return; - } - if let Some(r#ref) = src.r#ref { - if validate_input!(!src.data.data[0].is_null()).is_err() { - return; - } - rav1d_ref_inc(r#ref.as_ptr()); - } *dst = src.clone(); } pub(crate) unsafe fn rav1d_picture_move_ref(dst: &mut Rav1dPicture, src: &mut Rav1dPicture) { - if validate_input!(dst.data.data[0].is_null()).is_err() { - return; - } - if src.r#ref.is_some() { - if validate_input!(!src.data.data[0].is_null()).is_err() { - return; - } - } *dst = mem::take(src); } @@ -378,13 +338,7 @@ pub(crate) unsafe fn rav1d_thread_picture_move_ref( } pub(crate) unsafe fn rav1d_picture_unref_internal(p: &mut Rav1dPicture) { - let Rav1dPicture { data, r#ref, .. } = mem::take(p); - if let Some(r#ref) = r#ref { - if validate_input!(!data.data[0].is_null()).is_err() { - return; - } - rav1d_ref_dec(&mut r#ref.as_ptr()); - } + let _ = mem::take(p); } pub(crate) unsafe fn rav1d_thread_picture_unref(p: *mut Rav1dThreadPicture) { diff --git a/src/recon.rs b/src/recon.rs index cac0cf67d..10e15c5ea 100644 --- a/src/recon.rs +++ b/src/recon.rs @@ -2148,7 +2148,7 @@ unsafe fn mc( let dy = by * v_mul + (mvy >> 3 + ss_ver); let w; let h; - if refp.p.data.data[0] != f.cur.data.data[0] { + if refp.p.data.as_ref().unwrap().data[0] != f.cur.data.as_ref().unwrap().data[0] { w = f.cur.p.w + ss_hor >> ss_hor; h = f.cur.p.h + ss_ver >> ss_ver; } else { @@ -2170,7 +2170,7 @@ unsafe fn mc( (dy - (my != 0) as c_int * 3) as intptr_t, emu_edge_buf.as_mut_ptr().cast(), 192 * ::core::mem::size_of::() as isize, - refp.p.data.data[pl as usize].cast(), + refp.p.data.as_ref().unwrap().data[pl as usize].cast(), ref_stride, ); r#ref = emu_edge_buf @@ -2178,7 +2178,7 @@ unsafe fn mc( .add((192 * (my != 0) as c_int * 3 + (mx != 0) as c_int * 3) as usize); ref_stride = 192 * ::core::mem::size_of::() as isize; } else { - r#ref = (refp.p.data.data[pl as usize] as *mut BD::Pixel) + r#ref = (refp.p.data.as_ref().unwrap().data[pl as usize] as *mut BD::Pixel) .offset(BD::pxstride(ref_stride) * dy as isize) .offset(dx as isize); } @@ -2249,7 +2249,7 @@ unsafe fn mc( (top - 3) as intptr_t, emu_edge_buf.as_mut_ptr().cast(), 320 * ::core::mem::size_of::() as isize, - refp.p.data.data[pl as usize].cast(), + refp.p.data.as_ref().unwrap().data[pl as usize].cast(), ref_stride, ); r#ref = emu_edge_buf.as_mut_ptr().add((320 * 3 + 3) as usize); @@ -2258,7 +2258,7 @@ unsafe fn mc( println!("Emu"); } } else { - r#ref = (refp.p.data.data[pl as usize] as *mut BD::Pixel) + r#ref = (refp.p.data.as_ref().unwrap().data[pl as usize] as *mut BD::Pixel) .offset(BD::pxstride(ref_stride) * top as isize) .offset(left as isize); } @@ -2448,13 +2448,13 @@ unsafe fn warp_affine( (dy - 3) as intptr_t, emu_edge_buf.as_mut_ptr().cast(), 32 * ::core::mem::size_of::() as isize, - refp.p.data.data[pl as usize].cast(), + refp.p.data.as_ref().unwrap().data[pl as usize].cast(), ref_stride, ); ref_ptr = emu_edge_buf.as_ptr().add(32 * 3 + 3); ref_stride = 32 * ::core::mem::size_of::() as isize; } else { - ref_ptr = (refp.p.data.data[pl as usize] as *const BD::Pixel) + ref_ptr = (refp.p.data.as_ref().unwrap().data[pl as usize] as *const BD::Pixel) .offset((BD::pxstride(ref_stride) * dy as isize) as isize) .offset(dx as isize); } @@ -2534,10 +2534,11 @@ pub(crate) unsafe fn rav1d_recon_b_intra( let mut init_x = 0; while init_x < w4 { if b.c2rust_unnamed.c2rust_unnamed.pal_sz[0] != 0 { - let dst: *mut BD::Pixel = (f.cur.data.data[0] as *mut BD::Pixel).offset( - (4 * (t.b.y as isize * BD::pxstride(f.cur.stride[0]) + t.b.x as isize)) - as isize, - ); + let dst: *mut BD::Pixel = (f.cur.data.as_ref().unwrap().data[0] as *mut BD::Pixel) + .offset( + (4 * (t.b.y as isize * BD::pxstride(f.cur.stride[0]) + t.b.x as isize)) + as isize, + ); let pal_idx_guard; let pal_idx = if t.frame_thread.pass != 0 { let p = t.frame_thread.pass & 1; @@ -2606,11 +2607,12 @@ pub(crate) unsafe fn rav1d_recon_b_intra( y = init_y; t.b.y += init_y; while y < sub_h4 { - let mut dst: *mut BD::Pixel = (f.cur.data.data[0] as *mut BD::Pixel).offset( - (4 * (t.b.y as isize * BD::pxstride(f.cur.stride[0]) - + t.b.x as isize - + init_x as isize)) as isize, - ); + let mut dst: *mut BD::Pixel = + (f.cur.data.as_ref().unwrap().data[0] as *mut BD::Pixel).offset( + (4 * (t.b.y as isize * BD::pxstride(f.cur.stride[0]) + + t.b.x as isize + + init_x as isize)) as isize, + ); x = init_x; t.b.x += init_x; while x < sub_w4 { @@ -2645,7 +2647,8 @@ pub(crate) unsafe fn rav1d_recon_b_intra( let data_height = 4 * ts.tiling.row_end; let data_diff = (data_height - 1) as isize * data_stride; let dst_slice = slice::from_raw_parts( - (f.cur.data.data[0] as *const BD::Pixel).offset(cmp::min(data_diff, 0)), + (f.cur.data.as_ref().unwrap().data[0] as *const BD::Pixel) + .offset(cmp::min(data_diff, 0)), data_diff.unsigned_abs() + data_width as usize, ); m = rav1d_prepare_intra_edges( @@ -2825,7 +2828,8 @@ pub(crate) unsafe fn rav1d_recon_b_intra( if b.c2rust_unnamed.c2rust_unnamed.uv_mode as c_int == CFL_PRED as c_int { assert!(init_x == 0 && init_y == 0); let ac = &mut t.scratch.c2rust_unnamed_0.ac_txtp_map.ac; - let y_src: *mut BD::Pixel = (f.cur.data.data[0] as *mut BD::Pixel) + let y_src: *mut BD::Pixel = (f.cur.data.as_ref().unwrap().data[0] + as *mut BD::Pixel) .offset((4 * (t.b.x & !ss_hor)) as isize) .offset( ((4 * (t.b.y & !ss_ver)) as isize @@ -2836,8 +2840,10 @@ pub(crate) unsafe fn rav1d_recon_b_intra( * ((t.b.x >> ss_hor) as isize + (t.b.y >> ss_ver) as isize * BD::pxstride(stride)); let uv_dst: [*mut BD::Pixel; 2] = [ - (f.cur.data.data[1] as *mut BD::Pixel).offset(uv_off as isize), - (f.cur.data.data[2] as *mut BD::Pixel).offset(uv_off as isize), + (f.cur.data.as_ref().unwrap().data[1] as *mut BD::Pixel) + .offset(uv_off as isize), + (f.cur.data.as_ref().unwrap().data[2] as *mut BD::Pixel) + .offset(uv_off as isize), ]; let furthest_r = (cw4 << ss_hor) + (*t_dim).w as c_int - 1 & !((*t_dim).w as c_int - 1); @@ -2879,7 +2885,8 @@ pub(crate) unsafe fn rav1d_recon_b_intra( let data_height = 4 * ts.tiling.row_end >> ss_ver; let data_diff = (data_height - 1) as isize * data_stride; let uvdst_slice = slice::from_raw_parts( - (f.cur.data.data[1 + pl as usize] as *const BD::Pixel) + (f.cur.data.as_ref().unwrap().data[1 + pl as usize] + as *const BD::Pixel) .offset(cmp::min(data_diff, 0)), data_diff.unsigned_abs() + data_width as usize, ); @@ -2963,7 +2970,8 @@ pub(crate) unsafe fn rav1d_recon_b_intra( ) }; f.dsp.ipred.pal_pred.call::( - (f.cur.data.data[1] as *mut BD::Pixel).offset(uv_dstoff as isize), + (f.cur.data.as_ref().unwrap().data[1] as *mut BD::Pixel) + .offset(uv_dstoff as isize), f.cur.stride[1], pal[1].as_ptr(), pal_idx.as_ptr(), @@ -2971,7 +2979,8 @@ pub(crate) unsafe fn rav1d_recon_b_intra( cbh4 * 4, ); f.dsp.ipred.pal_pred.call::( - (f.cur.data.data[2] as *mut BD::Pixel).offset(uv_dstoff as isize), + (f.cur.data.as_ref().unwrap().data[2] as *mut BD::Pixel) + .offset(uv_dstoff as isize), f.cur.stride[1], pal[2].as_ptr(), pal_idx.as_ptr(), @@ -2980,14 +2989,16 @@ pub(crate) unsafe fn rav1d_recon_b_intra( ); if debug_block_info!(f, t.b) && 0 != 0 { hex_dump::( - (f.cur.data.data[1] as *mut BD::Pixel).offset(uv_dstoff as isize), + (f.cur.data.as_ref().unwrap().data[1] as *mut BD::Pixel) + .offset(uv_dstoff as isize), BD::pxstride(f.cur.stride[1] as usize), cbw4 as usize * 4, cbh4 as usize * 4, "u-pal-pred", ); hex_dump::( - (f.cur.data.data[2] as *mut BD::Pixel).offset(uv_dstoff as isize), + (f.cur.data.as_ref().unwrap().data[2] as *mut BD::Pixel) + .offset(uv_dstoff as isize), BD::pxstride(f.cur.stride[1] as usize), cbw4 as usize * 4, cbh4 as usize * 4, @@ -3017,8 +3028,10 @@ pub(crate) unsafe fn rav1d_recon_b_intra( y = init_y >> ss_ver; t.b.y += init_y; while y < sub_ch4 { - let mut dst: *mut BD::Pixel = - (f.cur.data.data[(1 + pl) as usize] as *mut BD::Pixel).offset( + let mut dst: *mut BD::Pixel = (f.cur.data.as_ref().unwrap().data + [(1 + pl) as usize] + as *mut BD::Pixel) + .offset( (4 * ((t.b.y >> ss_ver) as isize * BD::pxstride(stride) + (t.b.x + init_x >> ss_hor) as isize)) as isize, @@ -3088,7 +3101,8 @@ pub(crate) unsafe fn rav1d_recon_b_intra( let data_height = 4 * ts.tiling.row_end >> ss_ver; let data_diff = (data_height - 1) as isize * data_stride; let dstuv_slice = slice::from_raw_parts( - (f.cur.data.data[1 + pl as usize] as *const BD::Pixel) + (f.cur.data.as_ref().unwrap().data[1 + pl as usize] + as *const BD::Pixel) .offset(cmp::min(data_diff, 0)), data_diff.unsigned_abs() + data_width as usize, ); @@ -3321,7 +3335,7 @@ pub(crate) unsafe fn rav1d_recon_b_inter( .unwrap_or(Rav1dPixelLayoutSubSampled::I444); let cbh4 = bh4 + ss_ver >> ss_ver; let cbw4 = bw4 + ss_hor >> ss_hor; - let mut dst = (f.cur.data.data[0] as *mut BD::Pixel) + let mut dst = (f.cur.data.as_ref().unwrap().data[0] as *mut BD::Pixel) .offset((4 * (t.b.y as isize * BD::pxstride(f.cur.stride[0]) + t.b.x as isize)) as isize); let uvdstoff = 4 * ((t.b.x >> ss_hor) as isize + (t.b.y >> ss_ver) as isize * BD::pxstride(f.cur.stride[1])); @@ -3351,7 +3365,8 @@ pub(crate) unsafe fn rav1d_recon_b_inter( f, &mut t.scratch.c2rust_unnamed.emu_edge, t.b, - (f.cur.data.data[pl as usize] as *mut BD::Pixel).offset(uvdstoff as isize), + (f.cur.data.as_ref().unwrap().data[pl as usize] as *mut BD::Pixel) + .offset(uvdstoff as isize), 0 as *mut i16, f.cur.stride[1], bw4 << (bw4 == ss_hor) as c_int, @@ -3518,7 +3533,8 @@ pub(crate) unsafe fn rav1d_recon_b_inter( } } - let uvdst = (f.cur.data.data[(1 + pl) as usize] as *mut BD::Pixel) + let uvdst = (f.cur.data.as_ref().unwrap().data[(1 + pl) as usize] + as *mut BD::Pixel) .offset(uvdstoff as isize); match comp_inter_type { CompInterType::Avg => { @@ -3629,7 +3645,8 @@ pub(crate) unsafe fn rav1d_recon_b_inter( let data_height = 4 * ts.tiling.row_end; let data_diff = (data_height - 1) as isize * data_stride; let dst_slice = slice::from_raw_parts( - (f.cur.data.data[0] as *const BD::Pixel).offset(cmp::min(data_diff, 0)), + (f.cur.data.as_ref().unwrap().data[0] as *const BD::Pixel) + .offset(cmp::min(data_diff, 0)), data_diff.unsigned_abs() + data_width as usize, ); m = rav1d_prepare_intra_edges( @@ -3711,7 +3728,8 @@ pub(crate) unsafe fn rav1d_recon_b_inter( f, &mut t.scratch.c2rust_unnamed.emu_edge, t.b, - (f.cur.data.data[(1 + pl) as usize] as *mut BD::Pixel) + (f.cur.data.as_ref().unwrap().data[(1 + pl) as usize] + as *mut BD::Pixel) .offset(uvdstoff as isize), 0 as *mut i16, f.cur.stride[1], @@ -3750,7 +3768,8 @@ pub(crate) unsafe fn rav1d_recon_b_inter( f, &mut t.scratch.c2rust_unnamed.emu_edge, t.b, - (f.cur.data.data[(1 + pl) as usize] as *mut BD::Pixel) + (f.cur.data.as_ref().unwrap().data[(1 + pl) as usize] + as *mut BD::Pixel) .offset(uvdstoff as isize) .offset(v_off as isize), 0 as *mut i16, @@ -3788,7 +3807,8 @@ pub(crate) unsafe fn rav1d_recon_b_inter( f, &mut t.scratch.c2rust_unnamed.emu_edge, t.b, - (f.cur.data.data[(1 + pl) as usize] as *mut BD::Pixel) + (f.cur.data.as_ref().unwrap().data[(1 + pl) as usize] + as *mut BD::Pixel) .offset(uvdstoff as isize) .offset(h_off as isize), 0 as *mut i16, @@ -3821,7 +3841,7 @@ pub(crate) unsafe fn rav1d_recon_b_inter( f, &mut t.scratch.c2rust_unnamed.emu_edge, t.b, - (f.cur.data.data[(1 + pl) as usize] as *mut BD::Pixel) + (f.cur.data.as_ref().unwrap().data[(1 + pl) as usize] as *mut BD::Pixel) .offset(uvdstoff as isize) .offset(h_off as isize) .offset(v_off as isize), @@ -3850,7 +3870,8 @@ pub(crate) unsafe fn rav1d_recon_b_inter( f, &mut t.scratch.c2rust_unnamed.emu_edge, t.b, - (f.cur.data.data[(1 + pl) as usize] as *mut BD::Pixel) + (f.cur.data.as_ref().unwrap().data[(1 + pl) as usize] + as *mut BD::Pixel) .offset(uvdstoff as isize), 0 as *mut i16, f.cur.stride[1], @@ -3870,7 +3891,8 @@ pub(crate) unsafe fn rav1d_recon_b_inter( f, &mut t.scratch.c2rust_unnamed.emu_edge, t.b, - (f.cur.data.data[(1 + pl) as usize] as *mut BD::Pixel) + (f.cur.data.as_ref().unwrap().data[(1 + pl) as usize] + as *mut BD::Pixel) .offset(uvdstoff as isize), 0 as *mut i16, f.cur.stride[1], @@ -3888,7 +3910,8 @@ pub(crate) unsafe fn rav1d_recon_b_inter( obmc::( f, t, - (f.cur.data.data[(1 + pl) as usize] as *mut BD::Pixel) + (f.cur.data.as_ref().unwrap().data[(1 + pl) as usize] + as *mut BD::Pixel) .offset(uvdstoff as isize), f.cur.stride[1], b_dim, @@ -3923,7 +3946,8 @@ pub(crate) unsafe fn rav1d_recon_b_inter( b.interintra_mode() as IntraPredMode }; let mut angle = 0; - let uvdst = (f.cur.data.data[(1 + pl) as usize] as *mut BD::Pixel) + let uvdst = (f.cur.data.as_ref().unwrap().data[(1 + pl) as usize] + as *mut BD::Pixel) .offset(uvdstoff as isize); let top_sb_edge_slice = if t.b.y & f.sb_step - 1 == 0 { let mut top_sb_edge = @@ -3939,7 +3963,8 @@ pub(crate) unsafe fn rav1d_recon_b_inter( let data_height = 4 * ts.tiling.row_end >> ss_ver; let data_diff = (data_height - 1) as isize * data_stride; let dstuv_slice = slice::from_raw_parts( - (f.cur.data.data[1 + pl as usize] as *const BD::Pixel) + (f.cur.data.as_ref().unwrap().data[1 + pl as usize] + as *const BD::Pixel) .offset(cmp::min(data_diff, 0)), data_diff.unsigned_abs() + data_width as usize, ); @@ -4001,14 +4026,16 @@ pub(crate) unsafe fn rav1d_recon_b_inter( ); if has_chroma { hex_dump::( - &mut *(f.cur.data.data[1] as *mut BD::Pixel).offset(uvdstoff as isize), + &mut *(f.cur.data.as_ref().unwrap().data[1] as *mut BD::Pixel) + .offset(uvdstoff as isize), f.cur.stride[1] as usize, cbw4 as usize * 4, cbh4 as usize * 4, "u-pred", ); hex_dump::( - &mut *(f.cur.data.data[2] as *mut BD::Pixel).offset(uvdstoff as isize), + &mut *(f.cur.data.as_ref().unwrap().data[2] as *mut BD::Pixel) + .offset(uvdstoff as isize), f.cur.stride[1] as usize, cbw4 as usize * 4, cbh4 as usize * 4, @@ -4083,7 +4110,8 @@ pub(crate) unsafe fn rav1d_recon_b_inter( t.b.y -= y; if has_chroma { for pl in 0..2 { - let mut uvdst = (f.cur.data.data[(1 + pl) as usize] as *mut BD::Pixel) + let mut uvdst = (f.cur.data.as_ref().unwrap().data[(1 + pl) as usize] + as *mut BD::Pixel) .offset(uvdstoff as isize) .offset(BD::pxstride(f.cur.stride[1]) * init_y as isize * 4 >> ss_ver); y = init_y >> ss_ver; @@ -4230,19 +4258,19 @@ pub(crate) unsafe fn rav1d_filter_sbrow_deblock_cols( let p: [&mut [BD::Pixel]; 3] = [ slice::from_raw_parts_mut( - f.cur.data.data[f.lf.p[0]] + f.cur.data.as_ref().unwrap().data[f.lf.p[0]] .cast::() .offset(cmp::min(y_span, 0)), y_span.unsigned_abs() + y_width as usize + RAV1D_PICTURE_ALIGNMENT, ), slice::from_raw_parts_mut( - f.cur.data.data[f.lf.p[1]] + f.cur.data.as_ref().unwrap().data[f.lf.p[1]] .cast::() .offset(cmp::min(uv_span, 0)), uv_span.unsigned_abs() + uv_width as usize + RAV1D_PICTURE_ALIGNMENT, ), slice::from_raw_parts_mut( - f.cur.data.data[f.lf.p[2]] + f.cur.data.as_ref().unwrap().data[f.lf.p[2]] .cast::() .offset(cmp::min(uv_span, 0)), uv_span.unsigned_abs() + uv_width as usize + RAV1D_PICTURE_ALIGNMENT, @@ -4288,19 +4316,19 @@ pub(crate) unsafe fn rav1d_filter_sbrow_deblock_rows( let p: [&mut [BD::Pixel]; 3] = [ slice::from_raw_parts_mut( - f.cur.data.data[f.lf.p[0]] + f.cur.data.as_ref().unwrap().data[f.lf.p[0]] .cast::() .offset(cmp::min(y_span, 0)), y_span.unsigned_abs() + y_width as usize + RAV1D_PICTURE_ALIGNMENT, ), slice::from_raw_parts_mut( - f.cur.data.data[f.lf.p[1]] + f.cur.data.as_ref().unwrap().data[f.lf.p[1]] .cast::() .offset(cmp::min(uv_span, 0)), uv_span.unsigned_abs() + uv_width as usize + RAV1D_PICTURE_ALIGNMENT, ), slice::from_raw_parts_mut( - f.cur.data.data[f.lf.p[2]] + f.cur.data.as_ref().unwrap().data[f.lf.p[2]] .cast::() .offset(cmp::min(uv_span, 0)), uv_span.unsigned_abs() + uv_width as usize + RAV1D_PICTURE_ALIGNMENT, @@ -4340,13 +4368,13 @@ pub(crate) unsafe fn rav1d_filter_sbrow_cdef( let y = sby * sbsz * 4; let ss_ver = (f.cur.p.layout as c_uint == Rav1dPixelLayout::I420 as c_int as c_uint) as c_int; let p: [*mut BD::Pixel; 3] = [ - f.cur.data.data[f.lf.p[0]] + f.cur.data.as_ref().unwrap().data[f.lf.p[0]] .cast::() .offset((y as isize * BD::pxstride(f.cur.stride[0])) as isize), - f.cur.data.data[f.lf.p[1]] + f.cur.data.as_ref().unwrap().data[f.lf.p[1]] .cast::() .offset((y as isize * BD::pxstride(f.cur.stride[1]) >> ss_ver) as isize), - f.cur.data.data[f.lf.p[2]] + f.cur.data.as_ref().unwrap().data[f.lf.p[2]] .cast::() .offset((y as isize * BD::pxstride(f.cur.stride[1]) >> ss_ver) as isize), ]; @@ -4380,24 +4408,24 @@ pub(crate) unsafe fn rav1d_filter_sbrow_resize( let y = sby * sbsz * 4; let ss_ver = (f.cur.p.layout as c_uint == Rav1dPixelLayout::I420 as c_int as c_uint) as c_int; let p: [*const BD::Pixel; 3] = [ - f.cur.data.data[f.lf.p[0]] + f.cur.data.as_ref().unwrap().data[f.lf.p[0]] .cast::() .offset(y as isize * BD::pxstride(f.cur.stride[0])), - f.cur.data.data[f.lf.p[1]] + f.cur.data.as_ref().unwrap().data[f.lf.p[1]] .cast::() .offset(y as isize * BD::pxstride(f.cur.stride[1]) >> ss_ver), - f.cur.data.data[f.lf.p[2]] + f.cur.data.as_ref().unwrap().data[f.lf.p[2]] .cast::() .offset(y as isize * BD::pxstride(f.cur.stride[1]) >> ss_ver), ]; let sr_p: [*mut BD::Pixel; 3] = [ - f.sr_cur.p.data.data[f.lf.sr_p[0]] + f.sr_cur.p.data.as_ref().unwrap().data[f.lf.sr_p[0]] .cast::() .offset((y as isize * BD::pxstride(f.sr_cur.p.stride[0])) as isize), - f.sr_cur.p.data.data[f.lf.sr_p[1]] + f.sr_cur.p.data.as_ref().unwrap().data[f.lf.sr_p[1]] .cast::() .offset((y as isize * BD::pxstride(f.sr_cur.p.stride[1]) >> ss_ver) as isize), - f.sr_cur.p.data.data[f.lf.sr_p[2]] + f.sr_cur.p.data.as_ref().unwrap().data[f.lf.sr_p[2]] .cast::() .offset((y as isize * BD::pxstride(f.sr_cur.p.stride[1]) >> ss_ver) as isize), ]; @@ -4455,15 +4483,15 @@ pub(crate) unsafe fn rav1d_filter_sbrow_lr( let h = (*f).sr_cur.p.p.h + 127 & !127; let mut sr_p: [&mut [BD::Pixel]; 3] = [ slice::from_raw_parts_mut( - f.sr_cur.p.data.data[f.lf.sr_p[0]].cast::(), + f.sr_cur.p.data.as_ref().unwrap().data[f.lf.sr_p[0]].cast::(), (h as isize * BD::pxstride(f.sr_cur.p.stride[0])) as usize, ), slice::from_raw_parts_mut( - f.sr_cur.p.data.data[f.lf.sr_p[1]].cast::(), + f.sr_cur.p.data.as_ref().unwrap().data[f.lf.sr_p[1]].cast::(), (h as isize * BD::pxstride(f.sr_cur.p.stride[1])) as usize >> ss_ver, ), slice::from_raw_parts_mut( - f.sr_cur.p.data.data[f.lf.sr_p[2]].cast::(), + f.sr_cur.p.data.as_ref().unwrap().data[f.lf.sr_p[2]].cast::(), (h as isize * BD::pxstride(f.sr_cur.p.stride[1])) as usize >> ss_ver, ), ]; @@ -4503,7 +4531,7 @@ pub(crate) unsafe fn rav1d_backup_ipred_edge( let sby = t.b.y >> f.sb_shift; let sby_off = f.sb128w * 128 * sby; let x_off = ts.tiling.col_start; - let y: *const BD::Pixel = (f.cur.data.data[0] as *const BD::Pixel) + let y: *const BD::Pixel = (f.cur.data.as_ref().unwrap().data[0] as *const BD::Pixel) .offset((x_off * 4) as isize) .offset((((t.b.y + f.sb_step) * 4 - 1) as isize * BD::pxstride(f.cur.stride[0])) as isize); BD::pixel_copy( @@ -4533,7 +4561,7 @@ pub(crate) unsafe fn rav1d_backup_ipred_edge( .unwrap(), )[(sby_off + (x_off * 4 >> ss_hor)).try_into().unwrap()..], &slice::from_raw_parts( - f.cur.data.data[pl as usize].cast(), + f.cur.data.as_ref().unwrap().data[pl as usize].cast(), (uv_off + (4 * (ts.tiling.col_end - x_off) >> ss_hor) as isize) .try_into() .unwrap(), diff --git a/src/ref.rs b/src/ref.rs index df0bb9816..c4030b9c3 100644 --- a/src/ref.rs +++ b/src/ref.rs @@ -1,9 +1,6 @@ -use libc::free; -use libc::malloc; use std::ffi::c_int; use std::ffi::c_void; use std::sync::atomic::AtomicI32; -use std::sync::atomic::Ordering; #[repr(C)] pub struct Rav1dRef { @@ -14,47 +11,3 @@ pub struct Rav1dRef { pub(crate) free_callback: Option ()>, pub(crate) user_data: *mut c_void, } - -#[inline] -pub unsafe fn rav1d_ref_inc(r#ref: *mut Rav1dRef) { - (*r#ref).ref_cnt.fetch_add(1, Ordering::Relaxed); -} - -pub unsafe fn rav1d_ref_wrap( - ptr: *const u8, - free_callback: Option ()>, - user_data: *mut c_void, -) -> *mut Rav1dRef { - let res: *mut Rav1dRef = malloc(::core::mem::size_of::()) as *mut Rav1dRef; - if res.is_null() { - return 0 as *mut Rav1dRef; - } - (*res).data = 0 as *mut c_void; - (*res).const_data = ptr as *const c_void; - (*res).ref_cnt = AtomicI32::new(1); - (*res).free_ref = 1 as c_int; - (*res).free_callback = free_callback; - (*res).user_data = user_data; - return res; -} - -pub unsafe fn rav1d_ref_dec(pref: *mut *mut Rav1dRef) { - if pref.is_null() { - unreachable!(); - } - let r#ref: *mut Rav1dRef = *pref; - if r#ref.is_null() { - return; - } - *pref = 0 as *mut Rav1dRef; - if (*r#ref).ref_cnt.fetch_sub(1, Ordering::SeqCst) == 1 { - let free_ref = (*r#ref).free_ref; - ((*r#ref).free_callback).expect("non-null function pointer")( - (*r#ref).const_data as *const u8, - (*r#ref).user_data, - ); - if free_ref != 0 { - free(r#ref as *mut c_void); - } - } -} diff --git a/src/thread_task.rs b/src/thread_task.rs index fd3720daf..e69be3110 100644 --- a/src/thread_task.rs +++ b/src/thread_task.rs @@ -1257,7 +1257,7 @@ pub unsafe fn rav1d_worker_task(c: &Rav1dContext, task_thread: Arc 1`. let progress = &**f.sr_cur.progress.as_ref().unwrap(); - if !(f.sr_cur.p.data.data[0]).is_null() { + if f.sr_cur.p.data.is_some() { progress[0].store(if error_0 != 0 { FRAME_ERROR } else { y }, Ordering::SeqCst); } f.frame_thread_progress.entropy.store( @@ -1310,7 +1310,7 @@ pub unsafe fn rav1d_worker_task(c: &Rav1dContext, task_thread: Arc 1`. if let Some(progress) = &f.sr_cur.progress { // upon flush, this can be free'ed already - if !(f.sr_cur.p.data.data[0]).is_null() { + if f.sr_cur.p.data.is_some() { progress[1].store( if error_0 != 0 { FRAME_ERROR } else { y_0 }, Ordering::SeqCst,