1use super::{
2 PositionIterInternal, PyBytes, PyBytesRef, PyInt, PyListRef, PySlice, PyStr, PyStrRef, PyTuple,
3 PyTupleRef, PyType, PyTypeRef,
4};
5use crate::{
6 atomic_func,
7 buffer::FormatSpec,
8 bytesinner::bytes_to_hex,
9 class::PyClassImpl,
10 common::{
11 borrow::{BorrowedValue, BorrowedValueMut},
12 hash::PyHash,
13 lock::OnceCell,
14 },
15 convert::ToPyObject,
16 function::Either,
17 function::{FuncArgs, OptionalArg, PyComparisonValue},
18 protocol::{
19 BufferDescriptor, BufferMethods, PyBuffer, PyIterReturn, PyMappingMethods,
20 PySequenceMethods, VecBuffer,
21 },
22 sliceable::SequenceIndexOp,
23 types::{
24 AsBuffer, AsMapping, AsSequence, Comparable, Constructor, Hashable, IterNext, Iterable,
25 PyComparisonOp, Representable, SelfIter, Unconstructible,
26 },
27 AsObject, Context, Py, PyObject, PyObjectRef, PyPayload, PyRef, PyResult,
28 TryFromBorrowedObject, TryFromObject, VirtualMachine,
29};
30use crossbeam_utils::atomic::AtomicCell;
31use itertools::Itertools;
32use once_cell::sync::Lazy;
33use rustpython_common::lock::PyMutex;
34use std::{cmp::Ordering, fmt::Debug, mem::ManuallyDrop, ops::Range};
35
36#[derive(FromArgs)]
37pub struct PyMemoryViewNewArgs {
38 object: PyObjectRef,
39}
40
41#[pyclass(module = false, name = "memoryview")]
42#[derive(Debug)]
43pub struct PyMemoryView {
44 buffer: ManuallyDrop<PyBuffer>,
46 released: AtomicCell<bool>,
49 start: usize,
53 format_spec: FormatSpec,
54 desc: BufferDescriptor,
56 hash: OnceCell<PyHash>,
57 }
61
62impl Constructor for PyMemoryView {
63 type Args = PyMemoryViewNewArgs;
64
65 fn py_new(cls: PyTypeRef, args: Self::Args, vm: &VirtualMachine) -> PyResult {
66 let zelf = Self::from_object(&args.object, vm)?;
67 zelf.into_ref_with_type(vm, cls).map(Into::into)
68 }
69}
70
71impl PyMemoryView {
72 fn parse_format(format: &str, vm: &VirtualMachine) -> PyResult<FormatSpec> {
73 FormatSpec::parse(format.as_bytes(), vm)
74 }
75
76 pub fn from_object(obj: &PyObject, vm: &VirtualMachine) -> PyResult<Self> {
79 if let Some(other) = obj.payload::<Self>() {
80 Ok(other.new_view())
81 } else {
82 let buffer = PyBuffer::try_from_borrowed_object(vm, obj)?;
83 PyMemoryView::from_buffer(buffer, vm)
84 }
85 }
86
87 pub fn from_buffer(buffer: PyBuffer, vm: &VirtualMachine) -> PyResult<Self> {
91 let format_spec = Self::parse_format(&buffer.desc.format, vm)?;
95 let desc = buffer.desc.clone();
96
97 Ok(PyMemoryView {
98 buffer: ManuallyDrop::new(buffer),
99 released: AtomicCell::new(false),
100 start: 0,
101 format_spec,
102 desc,
103 hash: OnceCell::new(),
104 })
105 }
106
107 pub fn from_buffer_range(
111 buffer: PyBuffer,
112 range: Range<usize>,
113 vm: &VirtualMachine,
114 ) -> PyResult<Self> {
115 let mut zelf = Self::from_buffer(buffer, vm)?;
116
117 zelf.init_range(range, 0);
118 zelf.init_len();
119 Ok(zelf)
120 }
121
122 pub fn new_view(&self) -> Self {
124 let zelf = PyMemoryView {
125 buffer: self.buffer.clone(),
126 released: AtomicCell::new(false),
127 start: self.start,
128 format_spec: self.format_spec.clone(),
129 desc: self.desc.clone(),
130 hash: OnceCell::new(),
131 };
132 zelf.buffer.retain();
133 zelf
134 }
135
136 fn try_not_released(&self, vm: &VirtualMachine) -> PyResult<()> {
137 if self.released.load() {
138 Err(vm.new_value_error("operation forbidden on released memoryview object".to_owned()))
139 } else {
140 Ok(())
141 }
142 }
143
144 fn getitem_by_idx(&self, i: isize, vm: &VirtualMachine) -> PyResult {
145 if self.desc.ndim() != 1 {
146 return Err(vm.new_not_implemented_error(
147 "multi-dimensional sub-views are not implemented".to_owned(),
148 ));
149 }
150 let (shape, stride, suboffset) = self.desc.dim_desc[0];
151 let index = i
152 .wrapped_at(shape)
153 .ok_or_else(|| vm.new_index_error("index out of range".to_owned()))?;
154 let index = index as isize * stride + suboffset;
155 let pos = (index + self.start as isize) as usize;
156 self.unpack_single(pos, vm)
157 }
158
159 fn getitem_by_slice(&self, slice: &PySlice, vm: &VirtualMachine) -> PyResult {
160 let mut other = self.new_view();
161 other.init_slice(slice, 0, vm)?;
162 other.init_len();
163
164 Ok(other.into_ref(&vm.ctx).into())
165 }
166
167 fn getitem_by_multi_idx(&self, indexes: &[isize], vm: &VirtualMachine) -> PyResult {
168 let pos = self.pos_from_multi_index(indexes, vm)?;
169 let bytes = self.buffer.obj_bytes();
170 format_unpack(&self.format_spec, &bytes[pos..pos + self.desc.itemsize], vm)
171 }
172
173 fn setitem_by_idx(&self, i: isize, value: PyObjectRef, vm: &VirtualMachine) -> PyResult<()> {
174 if self.desc.ndim() != 1 {
175 return Err(vm.new_not_implemented_error("sub-views are not implemented".to_owned()));
176 }
177 let (shape, stride, suboffset) = self.desc.dim_desc[0];
178 let index = i
179 .wrapped_at(shape)
180 .ok_or_else(|| vm.new_index_error("index out of range".to_owned()))?;
181 let index = index as isize * stride + suboffset;
182 let pos = (index + self.start as isize) as usize;
183 self.pack_single(pos, value, vm)
184 }
185
186 fn setitem_by_multi_idx(
187 &self,
188 indexes: &[isize],
189 value: PyObjectRef,
190 vm: &VirtualMachine,
191 ) -> PyResult<()> {
192 let pos = self.pos_from_multi_index(indexes, vm)?;
193 self.pack_single(pos, value, vm)
194 }
195
196 fn pack_single(&self, pos: usize, value: PyObjectRef, vm: &VirtualMachine) -> PyResult<()> {
197 let mut bytes = self.buffer.obj_bytes_mut();
198 let data = self.format_spec.pack(vec![value], vm).map_err(|_| {
200 vm.new_type_error(format!(
201 "memoryview: invalid type for format '{}'",
202 &self.desc.format
203 ))
204 })?;
205 bytes[pos..pos + self.desc.itemsize].copy_from_slice(&data);
206 Ok(())
207 }
208
209 fn unpack_single(&self, pos: usize, vm: &VirtualMachine) -> PyResult {
210 let bytes = self.buffer.obj_bytes();
211 self.format_spec
213 .unpack(&bytes[pos..pos + self.desc.itemsize], vm)
214 .map(|x| {
215 if x.len() == 1 {
216 x.fast_getitem(0)
217 } else {
218 x.into()
219 }
220 })
221 }
222
223 fn pos_from_multi_index(&self, indexes: &[isize], vm: &VirtualMachine) -> PyResult<usize> {
224 match indexes.len().cmp(&self.desc.ndim()) {
225 Ordering::Less => {
226 return Err(vm.new_not_implemented_error("sub-views are not implemented".to_owned()))
227 }
228 Ordering::Greater => {
229 return Err(vm.new_type_error(format!(
230 "cannot index {}-dimension view with {}-element tuple",
231 self.desc.ndim(),
232 indexes.len()
233 )))
234 }
235 Ordering::Equal => (),
236 }
237
238 let pos = self.desc.position(indexes, vm)?;
239 let pos = (pos + self.start as isize) as usize;
240 Ok(pos)
241 }
242
243 fn init_len(&mut self) {
244 let product: usize = self.desc.dim_desc.iter().map(|x| x.0).product();
245 self.desc.len = product * self.desc.itemsize;
246 }
247
248 fn init_range(&mut self, range: Range<usize>, dim: usize) {
249 let (shape, stride, _) = self.desc.dim_desc[dim];
250 debug_assert!(shape >= range.len());
251
252 let mut is_adjusted = false;
253 for (_, _, suboffset) in self.desc.dim_desc.iter_mut().rev() {
254 if *suboffset != 0 {
255 *suboffset += stride * range.start as isize;
256 is_adjusted = true;
257 break;
258 }
259 }
260 if !is_adjusted {
261 self.start += stride as usize * range.start;
263 }
264 let newlen = range.len();
265 self.desc.dim_desc[dim].0 = newlen;
266 }
267
268 fn init_slice(&mut self, slice: &PySlice, dim: usize, vm: &VirtualMachine) -> PyResult<()> {
269 let (shape, stride, _) = self.desc.dim_desc[dim];
270 let slice = slice.to_saturated(vm)?;
271 let (range, step, slice_len) = slice.adjust_indices(shape);
272
273 let mut is_adjusted_suboffset = false;
274 for (_, _, suboffset) in self.desc.dim_desc.iter_mut().rev() {
275 if *suboffset != 0 {
276 *suboffset += stride * range.start as isize;
277 is_adjusted_suboffset = true;
278 break;
279 }
280 }
281 if !is_adjusted_suboffset {
282 self.start += stride as usize
284 * if step.is_negative() {
285 range.end - 1
286 } else {
287 range.start
288 };
289 }
290 self.desc.dim_desc[dim].0 = slice_len;
291 self.desc.dim_desc[dim].1 *= step;
292
293 Ok(())
294 }
295
296 fn _to_list(
297 &self,
298 bytes: &[u8],
299 mut index: isize,
300 dim: usize,
301 vm: &VirtualMachine,
302 ) -> PyResult<PyListRef> {
303 let (shape, stride, suboffset) = self.desc.dim_desc[dim];
304 if dim + 1 == self.desc.ndim() {
305 let mut v = Vec::with_capacity(shape);
306 for _ in 0..shape {
307 let pos = index + suboffset;
308 let pos = (pos + self.start as isize) as usize;
309 let obj =
310 format_unpack(&self.format_spec, &bytes[pos..pos + self.desc.itemsize], vm)?;
311 v.push(obj);
312 index += stride;
313 }
314 return Ok(vm.ctx.new_list(v));
315 }
316
317 let mut v = Vec::with_capacity(shape);
318 for _ in 0..shape {
319 let obj = self._to_list(bytes, index + suboffset, dim + 1, vm)?.into();
320 v.push(obj);
321 index += stride;
322 }
323 Ok(vm.ctx.new_list(v))
324 }
325
326 fn eq(zelf: &Py<Self>, other: &PyObject, vm: &VirtualMachine) -> PyResult<bool> {
327 if zelf.is(other) {
328 return Ok(true);
329 }
330 if zelf.released.load() {
331 return Ok(false);
332 }
333
334 if let Some(other) = other.payload::<Self>() {
335 if other.released.load() {
336 return Ok(false);
337 }
338 }
339
340 let other = match PyBuffer::try_from_borrowed_object(vm, other) {
341 Ok(buf) => buf,
342 Err(_) => return Ok(false),
343 };
344
345 if !is_equiv_shape(&zelf.desc, &other.desc) {
346 return Ok(false);
347 }
348
349 let a_itemsize = zelf.desc.itemsize;
350 let b_itemsize = other.desc.itemsize;
351 let a_format_spec = &zelf.format_spec;
352 let b_format_spec = &Self::parse_format(&other.desc.format, vm)?;
353
354 if zelf.desc.ndim() == 0 {
355 let a_val = format_unpack(a_format_spec, &zelf.buffer.obj_bytes()[..a_itemsize], vm)?;
356 let b_val = format_unpack(b_format_spec, &other.obj_bytes()[..b_itemsize], vm)?;
357 return vm.bool_eq(&a_val, &b_val);
358 }
359
360 let mut ret = Ok(true);
362 let a_bytes = zelf.buffer.obj_bytes();
363 let b_bytes = other.obj_bytes();
364 zelf.desc.zip_eq(&other.desc, false, |a_range, b_range| {
365 let a_range = (a_range.start + zelf.start as isize) as usize
366 ..(a_range.end + zelf.start as isize) as usize;
367 let b_range = b_range.start as usize..b_range.end as usize;
368 let a_val = match format_unpack(a_format_spec, &a_bytes[a_range], vm) {
369 Ok(val) => val,
370 Err(e) => {
371 ret = Err(e);
372 return true;
373 }
374 };
375 let b_val = match format_unpack(b_format_spec, &b_bytes[b_range], vm) {
376 Ok(val) => val,
377 Err(e) => {
378 ret = Err(e);
379 return true;
380 }
381 };
382 ret = vm.bool_eq(&a_val, &b_val);
383 if let Ok(b) = ret {
384 !b
385 } else {
386 true
387 }
388 });
389 ret
390 }
391
392 fn obj_bytes(&self) -> BorrowedValue<[u8]> {
393 if self.desc.is_contiguous() {
394 BorrowedValue::map(self.buffer.obj_bytes(), |x| {
395 &x[self.start..self.start + self.desc.len]
396 })
397 } else {
398 BorrowedValue::map(self.buffer.obj_bytes(), |x| &x[self.start..])
399 }
400 }
401
402 fn obj_bytes_mut(&self) -> BorrowedValueMut<[u8]> {
403 if self.desc.is_contiguous() {
404 BorrowedValueMut::map(self.buffer.obj_bytes_mut(), |x| {
405 &mut x[self.start..self.start + self.desc.len]
406 })
407 } else {
408 BorrowedValueMut::map(self.buffer.obj_bytes_mut(), |x| &mut x[self.start..])
409 }
410 }
411
412 fn as_contiguous(&self) -> Option<BorrowedValue<[u8]>> {
413 self.desc.is_contiguous().then(|| {
414 BorrowedValue::map(self.buffer.obj_bytes(), |x| {
415 &x[self.start..self.start + self.desc.len]
416 })
417 })
418 }
419
420 fn _as_contiguous_mut(&self) -> Option<BorrowedValueMut<[u8]>> {
421 self.desc.is_contiguous().then(|| {
422 BorrowedValueMut::map(self.buffer.obj_bytes_mut(), |x| {
423 &mut x[self.start..self.start + self.desc.len]
424 })
425 })
426 }
427
428 fn append_to(&self, buf: &mut Vec<u8>) {
429 if let Some(bytes) = self.as_contiguous() {
430 buf.extend_from_slice(&bytes);
431 } else {
432 buf.reserve(self.desc.len);
433 let bytes = &*self.buffer.obj_bytes();
434 self.desc.for_each_segment(true, |range| {
435 let start = (range.start + self.start as isize) as usize;
436 let end = (range.end + self.start as isize) as usize;
437 buf.extend_from_slice(&bytes[start..end]);
438 })
439 }
440 }
441
442 fn contiguous_or_collect<R, F: FnOnce(&[u8]) -> R>(&self, f: F) -> R {
443 let borrowed;
444 let mut collected;
445 let v = if let Some(bytes) = self.as_contiguous() {
446 borrowed = bytes;
447 &*borrowed
448 } else {
449 collected = vec![];
450 self.append_to(&mut collected);
451 &collected
452 };
453 f(v)
454 }
455
456 pub fn to_contiguous(&self, vm: &VirtualMachine) -> PyBuffer {
459 let mut data = vec![];
460 self.append_to(&mut data);
461
462 if self.desc.ndim() == 0 {
463 return VecBuffer::from(data)
464 .into_ref(&vm.ctx)
465 .into_pybuffer_with_descriptor(self.desc.clone());
466 }
467
468 let mut dim_desc = self.desc.dim_desc.clone();
469 dim_desc.last_mut().unwrap().1 = self.desc.itemsize as isize;
470 dim_desc.last_mut().unwrap().2 = 0;
471 for i in (0..dim_desc.len() - 1).rev() {
472 dim_desc[i].1 = dim_desc[i + 1].1 * dim_desc[i + 1].0 as isize;
473 dim_desc[i].2 = 0;
474 }
475
476 let desc = BufferDescriptor {
477 len: self.desc.len,
478 readonly: self.desc.readonly,
479 itemsize: self.desc.itemsize,
480 format: self.desc.format.clone(),
481 dim_desc,
482 };
483
484 VecBuffer::from(data)
485 .into_ref(&vm.ctx)
486 .into_pybuffer_with_descriptor(desc)
487 }
488}
489
490impl Py<PyMemoryView> {
491 fn setitem_by_slice(
492 &self,
493 slice: &PySlice,
494 src: PyObjectRef,
495 vm: &VirtualMachine,
496 ) -> PyResult<()> {
497 if self.desc.ndim() != 1 {
498 return Err(vm.new_not_implemented_error("sub-view are not implemented".to_owned()));
499 }
500
501 let mut dest = self.new_view();
502 dest.init_slice(slice, 0, vm)?;
503 dest.init_len();
504
505 if self.is(&src) {
506 return if !is_equiv_structure(&self.desc, &dest.desc) {
507 Err(vm.new_value_error(
508 "memoryview assignment: lvalue and rvalue have different structures".to_owned(),
509 ))
510 } else {
511 Ok(())
513 };
514 };
515
516 let src = if let Some(src) = src.downcast_ref::<PyMemoryView>() {
517 if self.buffer.obj.is(&src.buffer.obj) {
518 src.to_contiguous(vm)
519 } else {
520 AsBuffer::as_buffer(src, vm)?
521 }
522 } else {
523 PyBuffer::try_from_object(vm, src)?
524 };
525
526 if !is_equiv_structure(&src.desc, &dest.desc) {
527 return Err(vm.new_value_error(
528 "memoryview assignment: lvalue and rvalue have different structures".to_owned(),
529 ));
530 }
531
532 let mut bytes_mut = dest.buffer.obj_bytes_mut();
533 let src_bytes = src.obj_bytes();
534 dest.desc.zip_eq(&src.desc, true, |a_range, b_range| {
535 let a_range = (a_range.start + dest.start as isize) as usize
536 ..(a_range.end + dest.start as isize) as usize;
537 let b_range = b_range.start as usize..b_range.end as usize;
538 bytes_mut[a_range].copy_from_slice(&src_bytes[b_range]);
539 false
540 });
541
542 Ok(())
543 }
544}
545
546#[pyclass(with(
547 Py,
548 Hashable,
549 Comparable,
550 AsBuffer,
551 AsMapping,
552 AsSequence,
553 Constructor,
554 Iterable,
555 Representable
556))]
557impl PyMemoryView {
558 #[pymethod]
559 pub fn release(&self) {
560 if self.released.compare_exchange(false, true).is_ok() {
561 self.buffer.release();
562 }
563 }
564
565 #[pygetset]
566 fn obj(&self, vm: &VirtualMachine) -> PyResult<PyObjectRef> {
567 self.try_not_released(vm).map(|_| self.buffer.obj.clone())
568 }
569
570 #[pygetset]
571 fn nbytes(&self, vm: &VirtualMachine) -> PyResult<usize> {
572 self.try_not_released(vm).map(|_| self.desc.len)
573 }
574
575 #[pygetset]
576 fn readonly(&self, vm: &VirtualMachine) -> PyResult<bool> {
577 self.try_not_released(vm).map(|_| self.desc.readonly)
578 }
579
580 #[pygetset]
581 fn itemsize(&self, vm: &VirtualMachine) -> PyResult<usize> {
582 self.try_not_released(vm).map(|_| self.desc.itemsize)
583 }
584
585 #[pygetset]
586 fn ndim(&self, vm: &VirtualMachine) -> PyResult<usize> {
587 self.try_not_released(vm).map(|_| self.desc.ndim())
588 }
589
590 #[pygetset]
591 fn shape(&self, vm: &VirtualMachine) -> PyResult<PyTupleRef> {
592 self.try_not_released(vm)?;
593 Ok(vm.ctx.new_tuple(
594 self.desc
595 .dim_desc
596 .iter()
597 .map(|(shape, _, _)| shape.to_pyobject(vm))
598 .collect(),
599 ))
600 }
601
602 #[pygetset]
603 fn strides(&self, vm: &VirtualMachine) -> PyResult<PyTupleRef> {
604 self.try_not_released(vm)?;
605 Ok(vm.ctx.new_tuple(
606 self.desc
607 .dim_desc
608 .iter()
609 .map(|(_, stride, _)| stride.to_pyobject(vm))
610 .collect(),
611 ))
612 }
613
614 #[pygetset]
615 fn suboffsets(&self, vm: &VirtualMachine) -> PyResult<PyTupleRef> {
616 self.try_not_released(vm)?;
617 Ok(vm.ctx.new_tuple(
618 self.desc
619 .dim_desc
620 .iter()
621 .map(|(_, _, suboffset)| suboffset.to_pyobject(vm))
622 .collect(),
623 ))
624 }
625
626 #[pygetset]
627 fn format(&self, vm: &VirtualMachine) -> PyResult<PyStr> {
628 self.try_not_released(vm)
629 .map(|_| PyStr::from(self.desc.format.clone()))
630 }
631
632 #[pygetset]
633 fn contiguous(&self, vm: &VirtualMachine) -> PyResult<bool> {
634 self.try_not_released(vm).map(|_| self.desc.is_contiguous())
635 }
636
637 #[pygetset]
638 fn c_contiguous(&self, vm: &VirtualMachine) -> PyResult<bool> {
639 self.try_not_released(vm).map(|_| self.desc.is_contiguous())
640 }
641
642 #[pygetset]
643 fn f_contiguous(&self, vm: &VirtualMachine) -> PyResult<bool> {
644 self.try_not_released(vm)
646 .map(|_| self.desc.ndim() <= 1 && self.desc.is_contiguous())
647 }
648
649 #[pymethod(magic)]
650 fn enter(zelf: PyRef<Self>, vm: &VirtualMachine) -> PyResult<PyRef<Self>> {
651 zelf.try_not_released(vm).map(|_| zelf)
652 }
653
654 #[pymethod(magic)]
655 fn exit(&self, _args: FuncArgs) {
656 self.release();
657 }
658
659 #[pymethod(magic)]
660 fn getitem(zelf: PyRef<Self>, needle: PyObjectRef, vm: &VirtualMachine) -> PyResult {
661 zelf.try_not_released(vm)?;
662 if zelf.desc.ndim() == 0 {
663 if needle.is(&vm.ctx.ellipsis) {
665 return Ok(zelf.into());
666 }
667 if let Some(tuple) = needle.payload::<PyTuple>() {
668 if tuple.is_empty() {
669 return zelf.unpack_single(0, vm);
670 }
671 }
672 return Err(vm.new_type_error("invalid indexing of 0-dim memory".to_owned()));
673 }
674
675 match SubscriptNeedle::try_from_object(vm, needle)? {
676 SubscriptNeedle::Index(i) => zelf.getitem_by_idx(i, vm),
677 SubscriptNeedle::Slice(slice) => zelf.getitem_by_slice(&slice, vm),
678 SubscriptNeedle::MultiIndex(indices) => zelf.getitem_by_multi_idx(&indices, vm),
679 }
680 }
681
682 #[pymethod(magic)]
683 fn delitem(&self, _needle: PyObjectRef, vm: &VirtualMachine) -> PyResult<()> {
684 if self.desc.readonly {
685 return Err(vm.new_type_error("cannot modify read-only memory".to_owned()));
686 }
687 Err(vm.new_type_error("cannot delete memory".to_owned()))
688 }
689
690 #[pymethod(magic)]
691 fn len(&self, vm: &VirtualMachine) -> PyResult<usize> {
692 self.try_not_released(vm)?;
693 Ok(if self.desc.ndim() == 0 {
694 1
695 } else {
696 self.desc.dim_desc[0].0
698 })
699 }
700
701 #[pymethod]
702 fn tobytes(&self, vm: &VirtualMachine) -> PyResult<PyBytesRef> {
703 self.try_not_released(vm)?;
704 let mut v = vec![];
705 self.append_to(&mut v);
706 Ok(PyBytes::from(v).into_ref(&vm.ctx))
707 }
708
709 #[pymethod]
710 fn tolist(&self, vm: &VirtualMachine) -> PyResult<PyListRef> {
711 self.try_not_released(vm)?;
712 let bytes = self.buffer.obj_bytes();
713 if self.desc.ndim() == 0 {
714 return Ok(vm.ctx.new_list(vec![format_unpack(
715 &self.format_spec,
716 &bytes[..self.desc.itemsize],
717 vm,
718 )?]));
719 }
720 self._to_list(&bytes, 0, 0, vm)
721 }
722
723 #[pymethod]
724 fn toreadonly(&self, vm: &VirtualMachine) -> PyResult<PyRef<Self>> {
725 self.try_not_released(vm)?;
726 let mut other = self.new_view();
727 other.desc.readonly = true;
728 Ok(other.into_ref(&vm.ctx))
729 }
730
731 #[pymethod]
732 fn hex(
733 &self,
734 sep: OptionalArg<Either<PyStrRef, PyBytesRef>>,
735 bytes_per_sep: OptionalArg<isize>,
736 vm: &VirtualMachine,
737 ) -> PyResult<String> {
738 self.try_not_released(vm)?;
739 self.contiguous_or_collect(|x| bytes_to_hex(x, sep, bytes_per_sep, vm))
740 }
741
742 fn cast_to_1d(&self, format: PyStrRef, vm: &VirtualMachine) -> PyResult<Self> {
743 let format_spec = Self::parse_format(format.as_str(), vm)?;
744 let itemsize = format_spec.size();
745 if self.desc.len % itemsize != 0 {
746 return Err(
747 vm.new_type_error("memoryview: length is not a multiple of itemsize".to_owned())
748 );
749 }
750
751 Ok(Self {
752 buffer: self.buffer.clone(),
753 released: AtomicCell::new(false),
754 start: self.start,
755 format_spec,
756 desc: BufferDescriptor {
757 len: self.desc.len,
758 readonly: self.desc.readonly,
759 itemsize,
760 format: format.to_string().into(),
761 dim_desc: vec![(self.desc.len / itemsize, itemsize as isize, 0)],
762 },
763 hash: OnceCell::new(),
764 })
765 }
766
767 #[pymethod]
768 fn cast(&self, args: CastArgs, vm: &VirtualMachine) -> PyResult<PyRef<Self>> {
769 self.try_not_released(vm)?;
770 if !self.desc.is_contiguous() {
771 return Err(vm.new_type_error(
772 "memoryview: casts are restricted to C-contiguous views".to_owned(),
773 ));
774 }
775
776 let CastArgs { format, shape } = args;
777
778 if let OptionalArg::Present(shape) = shape {
779 if self.desc.is_zero_in_shape() {
780 return Err(vm.new_type_error(
781 "memoryview: cannot cast view with zeros in shape or strides".to_owned(),
782 ));
783 }
784
785 let tup;
786 let list;
787 let list_borrow;
788 let shape = match shape {
789 Either::A(shape) => {
790 tup = shape;
791 tup.as_slice()
792 }
793 Either::B(shape) => {
794 list = shape;
795 list_borrow = list.borrow_vec();
796 &list_borrow
797 }
798 };
799
800 let shape_ndim = shape.len();
801 if self.desc.ndim() != 1 && shape_ndim != 1 {
803 return Err(
804 vm.new_type_error("memoryview: cast must be 1D -> ND or ND -> 1D".to_owned())
805 );
806 }
807
808 let mut other = self.cast_to_1d(format, vm)?;
809 let itemsize = other.desc.itemsize;
810
811 if shape_ndim == 0 {
813 other.desc.dim_desc = vec![];
814 other.desc.len = itemsize;
815 return Ok(other.into_ref(&vm.ctx));
816 }
817
818 let mut product_shape = itemsize;
819 let mut dim_descriptor = Vec::with_capacity(shape_ndim);
820
821 for x in shape {
822 let x = usize::try_from_borrowed_object(vm, x)?;
823
824 if x > isize::MAX as usize / product_shape {
825 return Err(vm.new_value_error(
826 "memoryview.cast(): product(shape) > SSIZE_MAX".to_owned(),
827 ));
828 }
829 product_shape *= x;
830 dim_descriptor.push((x, 0, 0));
831 }
832
833 dim_descriptor.last_mut().unwrap().1 = itemsize as isize;
834 for i in (0..dim_descriptor.len() - 1).rev() {
835 dim_descriptor[i].1 = dim_descriptor[i + 1].1 * dim_descriptor[i + 1].0 as isize;
836 }
837
838 if product_shape != other.desc.len {
839 return Err(vm.new_type_error(
840 "memoryview: product(shape) * itemsize != buffer size".to_owned(),
841 ));
842 }
843
844 other.desc.dim_desc = dim_descriptor;
845
846 Ok(other.into_ref(&vm.ctx))
847 } else {
848 Ok(self.cast_to_1d(format, vm)?.into_ref(&vm.ctx))
849 }
850 }
851}
852
853#[pyclass]
854impl Py<PyMemoryView> {
855 #[pymethod(magic)]
856 fn setitem(
857 &self,
858 needle: PyObjectRef,
859 value: PyObjectRef,
860 vm: &VirtualMachine,
861 ) -> PyResult<()> {
862 self.try_not_released(vm)?;
863 if self.desc.readonly {
864 return Err(vm.new_type_error("cannot modify read-only memory".to_owned()));
865 }
866 if value.is(&vm.ctx.none) {
867 return Err(vm.new_type_error("cannot delete memory".to_owned()));
868 }
869
870 if self.desc.ndim() == 0 {
871 if needle.is(&vm.ctx.ellipsis) {
873 return self.pack_single(0, value, vm);
874 } else if let Some(tuple) = needle.payload::<PyTuple>() {
875 if tuple.is_empty() {
876 return self.pack_single(0, value, vm);
877 }
878 }
879 return Err(vm.new_type_error("invalid indexing of 0-dim memory".to_owned()));
880 }
881 match SubscriptNeedle::try_from_object(vm, needle)? {
882 SubscriptNeedle::Index(i) => self.setitem_by_idx(i, value, vm),
883 SubscriptNeedle::Slice(slice) => self.setitem_by_slice(&slice, value, vm),
884 SubscriptNeedle::MultiIndex(indices) => self.setitem_by_multi_idx(&indices, value, vm),
885 }
886 }
887
888 #[pymethod(magic)]
889 fn reduce_ex(&self, _proto: usize, vm: &VirtualMachine) -> PyResult {
890 self.reduce(vm)
891 }
892
893 #[pymethod(magic)]
894 fn reduce(&self, vm: &VirtualMachine) -> PyResult {
895 Err(vm.new_type_error("cannot pickle 'memoryview' object".to_owned()))
896 }
897}
898
899#[derive(FromArgs)]
900struct CastArgs {
901 #[pyarg(any)]
902 format: PyStrRef,
903 #[pyarg(any, optional)]
904 shape: OptionalArg<Either<PyTupleRef, PyListRef>>,
905}
906
907enum SubscriptNeedle {
908 Index(isize),
909 Slice(PyRef<PySlice>),
910 MultiIndex(Vec<isize>),
911 }
913
914impl TryFromObject for SubscriptNeedle {
915 fn try_from_object(vm: &VirtualMachine, obj: PyObjectRef) -> PyResult<Self> {
916 if let Some(i) = obj.payload::<PyInt>() {
918 Ok(Self::Index(i.try_to_primitive(vm)?))
919 } else if obj.payload_is::<PySlice>() {
920 Ok(Self::Slice(unsafe { obj.downcast_unchecked::<PySlice>() }))
921 } else if let Ok(i) = obj.try_index(vm) {
922 Ok(Self::Index(i.try_to_primitive(vm)?))
923 } else {
924 if let Some(tuple) = obj.payload::<PyTuple>() {
925 if tuple.iter().all(|x| x.payload_is::<PyInt>()) {
926 let v = tuple
927 .iter()
928 .map(|x| {
929 unsafe { x.downcast_unchecked_ref::<PyInt>() }
930 .try_to_primitive::<isize>(vm)
931 })
932 .try_collect()?;
933 return Ok(Self::MultiIndex(v));
934 } else if tuple.iter().all(|x| x.payload_is::<PySlice>()) {
935 return Err(vm.new_not_implemented_error(
936 "multi-dimensional slicing is not implemented".to_owned(),
937 ));
938 }
939 }
940 Err(vm.new_type_error("memoryview: invalid slice key".to_owned()))
941 }
942 }
943}
944
945static BUFFER_METHODS: BufferMethods = BufferMethods {
946 obj_bytes: |buffer| buffer.obj_as::<PyMemoryView>().obj_bytes(),
947 obj_bytes_mut: |buffer| buffer.obj_as::<PyMemoryView>().obj_bytes_mut(),
948 release: |buffer| buffer.obj_as::<PyMemoryView>().buffer.release(),
949 retain: |buffer| buffer.obj_as::<PyMemoryView>().buffer.retain(),
950};
951
952impl AsBuffer for PyMemoryView {
953 fn as_buffer(zelf: &Py<Self>, vm: &VirtualMachine) -> PyResult<PyBuffer> {
954 if zelf.released.load() {
955 Err(vm.new_value_error("operation forbidden on released memoryview object".to_owned()))
956 } else {
957 Ok(PyBuffer::new(
958 zelf.to_owned().into(),
959 zelf.desc.clone(),
960 &BUFFER_METHODS,
961 ))
962 }
963 }
964}
965
966impl Drop for PyMemoryView {
967 fn drop(&mut self) {
968 if self.released.load() {
969 unsafe { self.buffer.drop_without_release() };
970 } else {
971 unsafe { ManuallyDrop::drop(&mut self.buffer) };
972 }
973 }
974}
975
976impl AsMapping for PyMemoryView {
977 fn as_mapping() -> &'static PyMappingMethods {
978 static AS_MAPPING: PyMappingMethods = PyMappingMethods {
979 length: atomic_func!(|mapping, vm| PyMemoryView::mapping_downcast(mapping).len(vm)),
980 subscript: atomic_func!(|mapping, needle, vm| {
981 let zelf = PyMemoryView::mapping_downcast(mapping);
982 PyMemoryView::getitem(zelf.to_owned(), needle.to_owned(), vm)
983 }),
984 ass_subscript: atomic_func!(|mapping, needle, value, vm| {
985 let zelf = PyMemoryView::mapping_downcast(mapping);
986 if let Some(value) = value {
987 zelf.setitem(needle.to_owned(), value, vm)
988 } else {
989 Err(vm.new_type_error("cannot delete memory".to_owned()))
990 }
991 }),
992 };
993 &AS_MAPPING
994 }
995}
996
997impl AsSequence for PyMemoryView {
998 fn as_sequence() -> &'static PySequenceMethods {
999 static AS_SEQUENCE: Lazy<PySequenceMethods> = Lazy::new(|| PySequenceMethods {
1000 length: atomic_func!(|seq, vm| {
1001 let zelf = PyMemoryView::sequence_downcast(seq);
1002 zelf.try_not_released(vm)?;
1003 zelf.len(vm)
1004 }),
1005 item: atomic_func!(|seq, i, vm| {
1006 let zelf = PyMemoryView::sequence_downcast(seq);
1007 zelf.try_not_released(vm)?;
1008 zelf.getitem_by_idx(i, vm)
1009 }),
1010 ..PySequenceMethods::NOT_IMPLEMENTED
1011 });
1012 &AS_SEQUENCE
1013 }
1014}
1015
1016impl Comparable for PyMemoryView {
1017 fn cmp(
1018 zelf: &Py<Self>,
1019 other: &PyObject,
1020 op: PyComparisonOp,
1021 vm: &VirtualMachine,
1022 ) -> PyResult<PyComparisonValue> {
1023 match op {
1024 PyComparisonOp::Ne => {
1025 Self::eq(zelf, other, vm).map(|x| PyComparisonValue::Implemented(!x))
1026 }
1027 PyComparisonOp::Eq => Self::eq(zelf, other, vm).map(PyComparisonValue::Implemented),
1028 _ => Err(vm.new_type_error(format!(
1029 "'{}' not supported between instances of '{}' and '{}'",
1030 op.operator_token(),
1031 zelf.class().name(),
1032 other.class().name()
1033 ))),
1034 }
1035 }
1036}
1037
1038impl Hashable for PyMemoryView {
1039 fn hash(zelf: &Py<Self>, vm: &VirtualMachine) -> PyResult<PyHash> {
1040 zelf.hash
1041 .get_or_try_init(|| {
1042 zelf.try_not_released(vm)?;
1043 if !zelf.desc.readonly {
1044 return Err(
1045 vm.new_value_error("cannot hash writable memoryview object".to_owned())
1046 );
1047 }
1048 Ok(zelf.contiguous_or_collect(|bytes| vm.state.hash_secret.hash_bytes(bytes)))
1049 })
1050 .copied()
1051 }
1052}
1053
1054impl PyPayload for PyMemoryView {
1055 fn class(ctx: &Context) -> &'static Py<PyType> {
1056 ctx.types.memoryview_type
1057 }
1058}
1059
1060impl Representable for PyMemoryView {
1061 #[inline]
1062 fn repr_str(zelf: &Py<Self>, _vm: &VirtualMachine) -> PyResult<String> {
1063 let repr = if zelf.released.load() {
1064 format!("<released memory at {:#x}>", zelf.get_id())
1065 } else {
1066 format!("<memory at {:#x}>", zelf.get_id())
1067 };
1068 Ok(repr)
1069 }
1070}
1071
1072pub(crate) fn init(ctx: &Context) {
1073 PyMemoryView::extend_class(ctx, ctx.types.memoryview_type);
1074 PyMemoryViewIterator::extend_class(ctx, ctx.types.memoryviewiterator_type);
1075}
1076
1077fn format_unpack(
1078 format_spec: &FormatSpec,
1079 bytes: &[u8],
1080 vm: &VirtualMachine,
1081) -> PyResult<PyObjectRef> {
1082 format_spec.unpack(bytes, vm).map(|x| {
1083 if x.len() == 1 {
1084 x.fast_getitem(0)
1085 } else {
1086 x.into()
1087 }
1088 })
1089}
1090
1091fn is_equiv_shape(a: &BufferDescriptor, b: &BufferDescriptor) -> bool {
1092 if a.ndim() != b.ndim() {
1093 return false;
1094 }
1095
1096 let a_iter = a.dim_desc.iter().map(|x| x.0);
1097 let b_iter = b.dim_desc.iter().map(|x| x.0);
1098 for (a_shape, b_shape) in a_iter.zip(b_iter) {
1099 if a_shape != b_shape {
1100 return false;
1101 }
1102 if a_shape == 0 {
1104 break;
1105 }
1106 }
1107 true
1108}
1109
1110fn is_equiv_format(a: &BufferDescriptor, b: &BufferDescriptor) -> bool {
1111 a.itemsize == b.itemsize && a.format == b.format
1113}
1114
1115fn is_equiv_structure(a: &BufferDescriptor, b: &BufferDescriptor) -> bool {
1116 is_equiv_format(a, b) && is_equiv_shape(a, b)
1117}
1118
1119impl Iterable for PyMemoryView {
1120 fn iter(zelf: PyRef<Self>, vm: &VirtualMachine) -> PyResult {
1121 Ok(PyMemoryViewIterator {
1122 internal: PyMutex::new(PositionIterInternal::new(zelf, 0)),
1123 }
1124 .into_pyobject(vm))
1125 }
1126}
1127
1128#[pyclass(module = false, name = "memory_iterator")]
1129#[derive(Debug, Traverse)]
1130pub struct PyMemoryViewIterator {
1131 internal: PyMutex<PositionIterInternal<PyRef<PyMemoryView>>>,
1132}
1133
1134impl PyPayload for PyMemoryViewIterator {
1135 fn class(ctx: &Context) -> &'static Py<PyType> {
1136 ctx.types.memoryviewiterator_type
1137 }
1138}
1139
1140#[pyclass(with(Unconstructible, IterNext, Iterable))]
1141impl PyMemoryViewIterator {
1142 #[pymethod(magic)]
1143 fn reduce(&self, vm: &VirtualMachine) -> PyTupleRef {
1144 self.internal
1145 .lock()
1146 .builtins_iter_reduce(|x| x.clone().into(), vm)
1147 }
1148}
1149impl Unconstructible for PyMemoryViewIterator {}
1150
1151impl SelfIter for PyMemoryViewIterator {}
1152impl IterNext for PyMemoryViewIterator {
1153 fn next(zelf: &Py<Self>, vm: &VirtualMachine) -> PyResult<PyIterReturn> {
1154 zelf.internal.lock().next(|mv, pos| {
1155 let len = mv.len(vm)?;
1156 Ok(if pos >= len {
1157 PyIterReturn::StopIteration(None)
1158 } else {
1159 PyIterReturn::Return(mv.getitem_by_idx(pos.try_into().unwrap(), vm)?)
1160 })
1161 })
1162 }
1163}