App은 함께 배포되는 함수들과 클래스들의 묶음입니다.
import modal
app = modal.App()
하나 이상의 Function을 묶어 관리하게 되며, 하나의 namespace를 구성하여 서로 다른 App 사이에는 이름이 충돌되지 않습니다.
App의 구분
Ephemeral Apps
modal run 명령어 또는 app.run( ) 메소드로 실행되는 App입니다. 임시로 만들어지는 App이며, 스크립트가 실행되는 동안만 지속됩니다. 프로세스가 종료되거나 클라이언트가 더 이상 서버와 연결되어 있지 않다고 판단되면 App도 종료됩니다.
def main():
# ...
with app.run():
some_modal_function.remote()
이런 방식으로 App을 실행하면 로그나 진행바 등이 나타나지는 않습니다. 만약 이를 원한다면, modal.enable_output으로 명시해주어야 합니다.
def main():
# ...
with modal.enable_output():
with app.run():
some_modal_function.remote()
Deployed Apps
modal deploy 명령어로 실행되는 App입니다. modal app stop 명령어 혹은 대시보드를 통해 직접 정지하지 않는 한, 반영구적으로 클라우드에 존재하게 됩니다.
속성값
| 속성값 | 기능 | 입력값 | 기본값 |
| name | 이름 | str | None |
| tags | 메타데이터 | dict[str, str] | None |
| imags | 이미지 파일 | _Image | None |
| secrets | secret | _Secret | [ ] |
| volumes | volume | dict[Union[str, PurPosixPath], _Volume] | { } |
| include_source | 기본 설정값 여부 | bool | true |
def __init__(
self,
name: Optional[str] = None,
*,
tags: Optional[dict[str, str]] = None, # Additional metadata to set on the App
image: Optional[_Image] = None, # Default Image for the App (otherwise default to `modal.Image.debian_slim()`)
secrets: Sequence[_Secret] = [], # Secrets to add for all Functions in the App
volumes: dict[Union[str, PurePosixPath], _Volume] = {}, # Volume mounts to use for all Functions
include_source: bool = True, # Default configuration for adding Function source file(s) to the Modal container
) -> None:
메소드
| 메소드 | 기능 | 반환값 |
| name( ) | 사용자 지정 이름 반환 | str |
| app_id( ) | app id 반환 | str |
| description( ) | name 속성값 반환 만약 없다면, descriptive identifier를 대신 반환 |
str |
| lookup( ) | 찾는 이름에 해당하는 App을 반환 없다면, 그 이름의 App을 새로 생성 |
_App |
| run( ) | ephemeral App을 실행 | AsyncGenerator |
| deploy( ) | deployed App을 실행 | typing_extensions.Self |
| local_entrypoint( ) | modal run을 통해 App을 실행할 때, 제일 먼저 실행되는 함수를 지정 [@ 데코레이션] |
Callable |
| function( ) | modal Function 정의 [@ 데코레이션] |
_FunctionDecoratorType |
| cls( ) | modal Cls (클래스) 정의 [@ 데코레이션] |
Callable |
| include( ) | 다른 App의 객체들(Function 등)을 사용할 수 있도록 포함 | typing_extensions.Self |
| set_tags( ) | 메타데이터 추가 | None |
| get_tags( ) | 메타데이터 가져오기 | dict[str, str] |
lookup( )
@staticmethod
def lookup(
name: str,
*,
client: Optional[_Client] = None,
environment_name: Optional[str] = None,
create_if_missing: bool = False,
) -> "_App":
run( )
@contextmanager
def run(
self,
*,
client: Optional[_Client] = None,
detach: bool = False,
interactive: bool = False,
environment_name: Optional[str] = None,
) -> AsyncGenerator["_App", None]:
Function의 호출은 모두 run( ) 안에서 실행되어야 합니다.
with app.run():
some_modal_function.remote()
만약 Function의 remote( )가 전역 공간에 위치한다면, 스크립트 파일이 실행될 때 뿐만 아니라 Modal이 컨테이너에서 이 스크립트 파일을 import 할 때도 실행되기 때문에 문제가 될 수 있습니다.
그래서 전역 공간에 배치하고자 한다면, 이런 식으로 스크립트가 실행될 때만 remote( )가 실행되도록 조치해야 합니다.
if __name__ == "__main__":
with app.run():
some_modal_function.remote()
deploy( )
def deploy(
self,
*,
name: Optional[str] = None, # Name for the deployment, overriding any set on the App
environment_name: Optional[str] = None, # Environment to deploy the App in
tag: str = "", # Optional metadata that is specific to this deployment
client: Optional[_Client] = None, # Alternate client to use for communication with the server
) -> typing_extensions.Self:
마찬가지로 deploy( )를 전역 공간에 배치하고자 한다면, 이런 식으로 스크립트가 실행될 때만 deploy( )가 실행되도록 조치해야 합니다.
app = App("my-app")
with modal.enable_output():
app.deploy()
local_entrypoint( )
def local_entrypoint(
self,
_warn_parentheses_missing: Any = None,
*,
name: Optional[str] = None
) -> Callable[[Callable[..., Any]], _LocalEntrypoint]:
사용할 때는 데코레이션으로 표기합니다.
@app.local_entrypoint()
def main():
some_modal_function.remote()
function( )
@warn_on_renamed_autoscaler_settings
def function(
self,
*,
image: Optional[_Image] = None, # The image to run as the container for the function
schedule: Optional[Schedule] = None, # An optional Modal Schedule for the function
env: Optional[dict[str, Optional[str]]] = None, # Environment variables to set in the container
secrets: Optional[Collection[_Secret]] = None, # Secrets to inject into the container as environment variables
gpu: Union[
GPU_T, list[GPU_T]
] = None, # GPU request as string ("any", "T4", ...), object (`modal.GPU.A100()`, ...), or a list of either
serialized: bool = False, # Whether to send the function over using cloudpickle.
network_file_systems: dict[
Union[str, PurePosixPath], _NetworkFileSystem
] = {}, # Mountpoints for Modal NetworkFileSystems
volumes: dict[
Union[str, PurePosixPath], Union[_Volume, _CloudBucketMount]
] = {}, # Mount points for Modal Volumes & CloudBucketMounts
# Specify, in fractional CPU cores, how many CPU cores to request.
# Or, pass (request, limit) to additionally specify a hard limit in fractional CPU cores.
# CPU throttling will prevent a container from exceeding its specified limit.
cpu: Optional[Union[float, tuple[float, float]]] = None,
# Specify, in MiB, a memory request which is the minimum memory required.
# Or, pass (request, limit) to additionally specify a hard limit in MiB.
memory: Optional[Union[int, tuple[int, int]]] = None,
ephemeral_disk: Optional[int] = None, # Specify, in MiB, the ephemeral disk size for the Function.
min_containers: Optional[int] = None, # Minimum number of containers to keep warm, even when Function is idle.
max_containers: Optional[int] = None, # Limit on the number of containers that can be concurrently running.
buffer_containers: Optional[int] = None, # Number of additional idle containers to maintain under active load.
scaledown_window: Optional[int] = None, # Max time (in seconds) a container can remain idle while scaling down.
proxy: Optional[_Proxy] = None, # Reference to a Modal Proxy to use in front of this function.
retries: Optional[Union[int, Retries]] = None, # Number of times to retry each input in case of failure.
timeout: int = 300, # Maximum execution time for inputs and startup time in seconds.
startup_timeout: Optional[int] = None, # Maximum startup time in seconds with higher precedence than `timeout`.
name: Optional[str] = None, # Sets the Modal name of the function within the app
is_generator: Optional[
bool
] = None, # Set this to True if it's a non-generator function returning a [sync/async] generator object
cloud: Optional[str] = None, # Cloud provider to run the function on. Possible values are aws, gcp, oci, auto.
region: Optional[Union[str, Sequence[str]]] = None, # Region or regions to run the function on.
nonpreemptible: bool = False, # Whether to run the function on a nonpreemptible instance.
enable_memory_snapshot: bool = False, # Enable memory checkpointing for faster cold starts.
block_network: bool = False, # Whether to block network access
restrict_modal_access: bool = False, # Whether to allow this function access to other Modal resources
single_use_containers: bool = False, # When True, containers will shut down after handling a single input
i6pn: Optional[bool] = None, # Whether to enable IPv6 container networking within the region.
# Whether the file or directory containing the Function's source should automatically be included
# in the container. When unset, falls back to the App-level configuration, or is otherwise True by default.
include_source: Optional[bool] = None,
experimental_options: Optional[dict[str, Any]] = None,
# Parameters below here are experimental. Use with caution!
_experimental_proxy_ip: Optional[str] = None, # IP address of proxy
_experimental_custom_scaling_factor: Optional[float] = None, # Custom scaling factor
_experimental_restrict_output: bool = False, # Don't use pickle for return values
# Parameters below here are deprecated. Please update your code as suggested
keep_warm: Optional[int] = None, # Replaced with `min_containers`
concurrency_limit: Optional[int] = None, # Replaced with `max_containers`
container_idle_timeout: Optional[int] = None, # Replaced with `scaledown_window`
allow_concurrent_inputs: Optional[int] = None, # Replaced with the `@modal.concurrent` decorator
max_inputs: Optional[int] = None, # Replaced with `single_use_containers`
_experimental_buffer_containers: Optional[int] = None, # Now stable API with `buffer_containers`
_experimental_scheduler_placement: Optional[SchedulerPlacement] = None, # Replaced in favor of
# using `region` and `nonpreemptible`
) -> _FunctionDecoratorType:
사용할 때는 데코레이션으로 표기합니다.
@app.function()
def f():
print("Hello world!")
cls( )
@typing_extensions.dataclass_transform(field_specifiers=(parameter,), kw_only_default=True)
@warn_on_renamed_autoscaler_settings
def cls(
self,
*,
image: Optional[_Image] = None, # The image to run as the container for the function
env: Optional[dict[str, Optional[str]]] = None, # Environment variables to set in the container
secrets: Optional[Collection[_Secret]] = None, # Secrets to inject into the container as environment variables
gpu: Union[
GPU_T, list[GPU_T]
] = None, # GPU request as string ("any", "T4", ...), object (`modal.GPU.A100()`, ...), or a list of either
serialized: bool = False, # Whether to send the function over using cloudpickle.
network_file_systems: dict[
Union[str, PurePosixPath], _NetworkFileSystem
] = {}, # Mountpoints for Modal NetworkFileSystems
volumes: dict[
Union[str, PurePosixPath], Union[_Volume, _CloudBucketMount]
] = {}, # Mount points for Modal Volumes & CloudBucketMounts
# Specify, in fractional CPU cores, how many CPU cores to request.
# Or, pass (request, limit) to additionally specify a hard limit in fractional CPU cores.
# CPU throttling will prevent a container from exceeding its specified limit.
cpu: Optional[Union[float, tuple[float, float]]] = None,
# Specify, in MiB, a memory request which is the minimum memory required.
# Or, pass (request, limit) to additionally specify a hard limit in MiB.
memory: Optional[Union[int, tuple[int, int]]] = None,
ephemeral_disk: Optional[int] = None, # Specify, in MiB, the ephemeral disk size for the Function.
min_containers: Optional[int] = None, # Minimum number of containers to keep warm, even when Function is idle.
max_containers: Optional[int] = None, # Limit on the number of containers that can be concurrently running.
buffer_containers: Optional[int] = None, # Number of additional idle containers to maintain under active load.
scaledown_window: Optional[int] = None, # Max time (in seconds) a container can remain idle while scaling down.
proxy: Optional[_Proxy] = None, # Reference to a Modal Proxy to use in front of this function.
retries: Optional[Union[int, Retries]] = None, # Number of times to retry each input in case of failure.
timeout: int = 300, # Maximum execution time for inputs and startup time in seconds.
startup_timeout: Optional[int] = None, # Maximum startup time in seconds with higher precedence than `timeout`.
cloud: Optional[str] = None, # Cloud provider to run the function on. Possible values are aws, gcp, oci, auto.
region: Optional[Union[str, Sequence[str]]] = None, # Region or regions to run the function on.
nonpreemptible: bool = False, # Whether to run the function on a non-preemptible instance.
enable_memory_snapshot: bool = False, # Enable memory checkpointing for faster cold starts.
block_network: bool = False, # Whether to block network access
restrict_modal_access: bool = False, # Whether to allow this class access to other Modal resources
single_use_containers: bool = False, # When True, containers will shut down after handling a single input
i6pn: Optional[bool] = None, # Whether to enable IPv6 container networking within the region.
include_source: Optional[bool] = None, # When `False`, don't automatically add the App source to the container.
experimental_options: Optional[dict[str, Any]] = None,
# Parameters below here are experimental. Use with caution!
_experimental_proxy_ip: Optional[str] = None, # IP address of proxy
_experimental_custom_scaling_factor: Optional[float] = None, # Custom scaling factor
_experimental_restrict_output: bool = False, # Don't use pickle for return values
# Parameters below here are deprecated. Please update your code as suggested
keep_warm: Optional[int] = None, # Replaced with `min_containers`
concurrency_limit: Optional[int] = None, # Replaced with `max_containers`
container_idle_timeout: Optional[int] = None, # Replaced with `scaledown_window`
allow_concurrent_inputs: Optional[int] = None, # Replaced with the `@modal.concurrent` decorator
max_inputs: Optional[int] = None, # Replaced with `single_use_containers`
_experimental_buffer_containers: Optional[int] = None, # Now stable API with `buffer_containers`
_experimental_scheduler_placement: Optional[SchedulerPlacement] = None, # Replaced in favor of
# using `region` and `nonpreemptible`
) -> Callable[[Union[CLS_T, _PartialFunction]], CLS_T]:
include( )
def include(
self,
/,
other_app: "_App",
inherit_tags: bool = True
) -> typing_extensions.Self:
사용시 다른 App에 있는 Function도 사용할 수 있게 됩니다.
app_a = modal.App("a")
@app.function()
def foo():
...
app_b = modal.App("b")
@app.function()
def bar():
...
app_a.include(app_b)
@app_a.local_entrypoint()
def main():
# use function declared on the included app
bar.remote()
set_tags( )
def set_tags(
self,
tags: Mapping[str, str],
*,
client: Optional[_Client] = None
) -> None:
get_tags( )
def get_tags(
self,
*,
client: Optional[_Client] = None
) -> dict[str, str]:
'ML' 카테고리의 다른 글
| [Modal] 03. Secret (0) | 2025.12.31 |
|---|---|
| [Modal] 02. Function (1) | 2025.12.31 |
| [Modal] App, Function, Entrypoint (0) | 2025.12.29 |
| [Modal] Modal에 대하여 (0) | 2025.12.28 |
| [GAN] Style 적대적 생성 신경망 (StyleGAN) (0) | 2023.04.28 |