Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .python-version
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
3.11
748 changes: 743 additions & 5 deletions poetry.lock

Large diffs are not rendered by default.

4 changes: 3 additions & 1 deletion pyclip/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,4 @@

from .core.video import Video
from .core.video import Video

from .operations import Trim
1 change: 1 addition & 0 deletions pyclip/core/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .video import Video
95 changes: 82 additions & 13 deletions pyclip/core/video.py
Original file line number Diff line number Diff line change
@@ -1,33 +1,102 @@
from __future__ import annotations
from functools import reduce
import math
import numbers

from pathlib import Path
from torch import Tensor
import torch

class Model:
pass
from ..utils.enums import TimeUnits

class Video(Model):
from pydantic import BaseModel, PositiveFloat

def trim(self, start: float, end: float | None = None, unit: str = "ms") -> Video:

class Video(BaseModel, arbitrary_types_allowed=True):
clip: Tensor
fps: float
audio: Tensor | None = None
sampling_rate: int | None = None

@classmethod
def rand(cls):
fps = 30
sample_rate = 44100

F, C, H, W = 120, 3, 170, 200

SAMPLES = math.floor(F/fps * sample_rate)
AUDIO_CHANNELS = 2

random_clip = torch.rand([F, C, H, W])
random_audio = torch.rand([SAMPLES, AUDIO_CHANNELS])

return cls(clip=random_clip, fps=fps, audio=random_audio, sampling_rate=sample_rate)

@property
def duration(self) -> float:
"""Returns the duration of the video in milliseconds."""
num_frames = self.clip.shape[0]
return num_frames / self.fps * 1000

@property
def frames(self) -> int:
return self.clip.shape[0]

@property
def channels(self) -> int:
return self.clip.shape[1]

@property
def height(self) -> int:
return self.clip.shape[2]

@property
def width(self) -> int:
return self.clip.shape[3]

def trim(self, start: float, end: float | None = None, step: float | None = None, unit: str = "ms") -> Video:
"""
Returns a clip playing the content of the current clip
between times `start` and `end`
"""
...
from ..operations import Trim

op = Trim(start=start, end=end, step=step, unit=unit)
return op(self)

def __getitem__(self, index: int | slice) -> Video:
if isinstance(index, slice):
return self.trim(start=index.start, end=index.stop, step=index.step, unit=TimeUnits.frame)
elif isinstance(index, numbers.Integral):
return self.trim(start=index, end=index+1, unit=TimeUnits.frame)

def eq(self, other: Video) -> bool:
from ..operations import Equality
op = Equality()
return op(self, other)

def __eq__(self, other: object) -> bool:
return self.eq(other)

def apply(self, transforms: list[Video]) -> Video:
return reduce(lambda video, op: op(video), transforms, self)

def mute(self, channels: int | list[int] | None = None):
...
return Video()

def resize(self, **kwargs):
...
return Video()

def upscale(self, **kwargs):
...

def rand(self):
...
return Video()

def open(self, path: str | Path):
...
return Video()

def save(self, path: str | Path):
...
return Video()

def __repr__(self) -> str:
return f"Video(shape={list(self.clip.shape)}, fps={self.fps}, audio={list(self.audio.shape) if self.audio is not None else None}, sampling_rate={self.sampling_rate})"

2 changes: 2 additions & 0 deletions pyclip/operations/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
from .trim import Trim
from .eq import Equality
17 changes: 17 additions & 0 deletions pyclip/operations/eq.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from dataclasses import dataclass
from typing import Any, TYPE_CHECKING

from ..utils.enums import TimeUnits
from ..core import Video


class Equality:

def __call__(self, *videos: Video) -> bool:
if any(not isinstance(video, Video) for video in videos):
return False

return all(self._is_equal(videos[0], video) for video in videos)

def _is_equal(self, video1: Video, video2: Video) -> bool:
return video1.clip.eq(video2.clip).all() and video1.fps == video2.fps and video1.audio.eq(video2.audio).all() and video1.sampling_rate == video2.sampling_rate
66 changes: 66 additions & 0 deletions pyclip/operations/trim.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
from typing_extensions import Self

from typing import Any, Union, Optional
from pydantic import BaseModel, NonNegativeFloat, field_validator, model_validator, validator
from ..utils.enums import TimeUnits
from ..core import Video

class Trim(BaseModel):
"""Class to trim a video based on specified start, end times, and a step value, with a given unit (either frames or milliseconds)."""

start: NonNegativeFloat
end: Optional[NonNegativeFloat] = None
step: Optional[NonNegativeFloat] = None # Default step is 1 to get every frame or millisecond.
unit: TimeUnits = TimeUnits.milliseconds

@model_validator(mode='after')
def validate_end_not_before_start(self) -> Self:
"""Ensures that end is not before start."""
if self.end is not None and self.end < self.start:
raise ValueError("End time cannot be before start time.")
return self

def _convert_to_frames(self, video: Video, value: int | None) -> int | None:
"""
Converts a time in milliseconds or a frame count to a frame count based on the unit.

Args:
video (Video): The video from which frame rate is obtained.
value (NonNegativeFloat): The value to be converted.

Returns:
int: The value converted to frames.
"""
if value is not None:
return int(value) if self.unit == TimeUnits.frame else int(value * video.fps // 1000)

def _compute_indices(self, video: Video, start: int, end: Optional[int], step: Optional[int]) -> tuple[tuple[int, Optional[int]], tuple[int, Optional[int]]]:
"""
Computes the starting and ending indices for the video clip and audio segment.

Args:
video (Video): The video from which indices are being computed.
start (int): The starting index.
end (Optional[int]): The ending index.
step (Optional[int]): The step size.

Returns:
Tuple[Tuple[int, Optional[int]], Tuple[int, Optional[int]]]: The video and audio indices.
"""
audio_start = int((start / video.fps) * video.sampling_rate)
audio_end = int((end / video.fps) * video.sampling_rate) if end is not None else None
return slice(start, end, step), slice(audio_start, audio_end, step)

def __call__(self, video: Video) -> Video:
"""
Trims the video based on the specified unit (frames or milliseconds).

Args:
video (Video): The video to be trimmed.

Returns:
Video: The trimmed video.
"""
start, end, step = map(lambda x: self._convert_to_frames(video, x), (self.start, self.end, self.step))
video_slice, audio_slice = self._compute_indices(video, start, end, step)
return Video(clip=video.clip[video_slice], fps=video.fps, audio=video.audio[audio_slice], sampling_rate=video.sampling_rate)
6 changes: 6 additions & 0 deletions pyclip/utils/enums.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
from enum import Enum


class TimeUnits(str, Enum):
milliseconds = "ms"
frame = "f"
6 changes: 5 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,12 @@ authors = ["Pedro Valois <vaz.valois@hotmail.com>"]
readme = "README.md"

[tool.poetry.dependencies]
python = "^3.11"
python = ">=3.10,<3.13"
pytest = "^7.4.2"
ipython = "^8.16.1"
torch = "^2.1.0"
pydantic = "^2.4.2"
numpy = "^1.26.1"


[build-system]
Expand Down
7 changes: 7 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
import pyclip
import pytest

@pytest.fixture
def mock_video() -> pyclip.Video:
"""Fixture to generate a mock video for testing."""
return pyclip.Video.rand()
84 changes: 84 additions & 0 deletions tests/test_flip.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
"""Tests for the rotate and flip methods of the Video class.

References:
- https://kornia.readthedocs.io/en/latest/geometry.transform.html#kornia.geometry.transform.hflip
- https://kornia.readthedocs.io/en/latest/geometry.transform.html#kornia.geometry.transform.vflip

"""

import pyclip
import pytest

def test_flip_horizontal(mock_video: pyclip.Video):
"""Test flipping a video horizontally."""
flipped_once = mock_video.fliph()
assert flipped_once != mock_video

flipped_twice = flipped_once.fliph()
assert flipped_twice == mock_video

def test_flip_vertical(mock_video: pyclip.Video):
"""Test flipping a video vertically."""
flipped_once = mock_video.flipv()
assert flipped_once != mock_video

flipped_twice = flipped_once.flipv()
assert flipped_twice == mock_video

def test_flip_both(mock_video: pyclip.Video):
"""Test flipping a video both horizontally and vertically."""
assert mock_video.flip(horizontal=True, vertical=True) == mock_video.flipv().fliph() == mock_video.fliph().flipv()

def test_flip_without_args(mock_video: pyclip.Video):
"""Test flipping without any arguments should raise a ValueError."""
with pytest.raises(ValueError):
mock_video.flip()

def test_flip_audio_sync(mock_video: pyclip.Video):
"""Test audio synchronization after flipping."""
flipped = mock_video.fliph()
assert flipped.audio == mock_video.audio

def test_flip_horizontal_and_vertical(mock_video: pyclip.Video):
"""Test flipping a video horizontally and then vertically should equal flipping both at once."""
flipped_sequentially = mock_video.fliph().flipv()
flipped_simultaneously = mock_video.flip(horizontal=True, vertical=True)
assert flipped_sequentially == flipped_simultaneously

def test_flip_vertical_audio_sync(mock_video: pyclip.Video):
"""Test audio synchronization after vertical flipping."""
flipped = mock_video.flipv()
assert flipped.audio == mock_video.audio

def test_flip_invalid_arguments(mock_video: pyclip.Video):
"""Test flipping with an invalid argument type should raise a TypeError."""
with pytest.raises(TypeError):
mock_video.flip(horizontal="True", vertical=True)

def test_fliph_method_vs_flip_function(mock_video: pyclip.Video):
"""Test equivalence of fliph method vs flip function with horizontal=True."""
assert mock_video.fliph() == mock_video.flip(horizontal=True)

def test_flipv_method_vs_flip_function(mock_video: pyclip.Video):
"""Test equivalence of flipv method vs flip function with vertical=True."""
assert mock_video.flipv() == mock_video.flip(vertical=True)

def test_flip_with_both_false(mock_video: pyclip.Video):
"""Test flipping with both horizontal and vertical set to False should return the original."""
assert mock_video.flip(horizontal=False, vertical=False) == mock_video

def test_flip_idempotence(mock_video: pyclip.Video):
"""Test that flipping twice in any direction should return the original video."""
assert mock_video.flip(horizontal=True, vertical=True).flip(horizontal=True, vertical=True) == mock_video.fliph().fliph() == mock_video.flipv().flipv() == mock_video

def test_flip_chain_operations(mock_video: pyclip.Video):
"""Test chaining multiple flip operations."""
result = mock_video.fliph().flipv().fliph()
assert result == mock_video.flipv()

def test_flip_no_audio(mock_video: pyclip.Video):
"""Test flipping a video with no audio."""
# Assuming the Video class has a method to remove audio
video_no_audio = mock_video.remove_audio()
flipped = video_no_audio.fliph()
assert flipped.audio is None
Loading