Kernels
danieldk HF Staff commited on
Commit
af977a5
·
1 Parent(s): 67ac53b

Revert "Build uploaded using `kernels`."

Browse files

This reverts commit 67ac53b37fc4c10a1ca2f201cd5da2d71f674f2e.

Files changed (50) hide show
  1. build/torch28-cxx11-cu126-x86_64-linux/paged_attention/__init__.py +21 -0
  2. build/torch28-cxx11-cu126-x86_64-linux/paged_attention/_custom_ops.py +173 -0
  3. build/torch28-cxx11-cu126-x86_64-linux/paged_attention/_ops.py +9 -0
  4. build/torch28-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_287831d.abi3.so +3 -0
  5. build/torch28-cxx11-cu126-x86_64-linux/paged_attention/platforms.py +92 -0
  6. build/torch28-cxx11-cu128-x86_64-linux/paged_attention/__init__.py +21 -0
  7. build/torch28-cxx11-cu128-x86_64-linux/paged_attention/_custom_ops.py +173 -0
  8. build/torch28-cxx11-cu128-x86_64-linux/paged_attention/_ops.py +9 -0
  9. build/torch28-cxx11-cu128-x86_64-linux/paged_attention/_paged_attention_287831d.abi3.so +3 -0
  10. build/torch28-cxx11-cu128-x86_64-linux/paged_attention/platforms.py +92 -0
  11. build/torch28-cxx11-cu129-x86_64-linux/paged_attention/__init__.py +21 -0
  12. build/torch28-cxx11-cu129-x86_64-linux/paged_attention/_custom_ops.py +173 -0
  13. build/torch28-cxx11-cu129-x86_64-linux/paged_attention/_ops.py +9 -0
  14. build/torch28-cxx11-cu129-x86_64-linux/paged_attention/_paged_attention_287831d.abi3.so +3 -0
  15. build/torch28-cxx11-cu129-x86_64-linux/paged_attention/platforms.py +92 -0
  16. build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/__init__.py +21 -0
  17. build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/_custom_ops.py +173 -0
  18. build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/_ops.py +9 -0
  19. build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/_paged_attention_287831d.abi3.so +3 -0
  20. build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/platforms.py +92 -0
  21. build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/__init__.py +21 -0
  22. build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/_custom_ops.py +173 -0
  23. build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/_ops.py +9 -0
  24. build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/_paged_attention_287831d.abi3.so +3 -0
  25. build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/platforms.py +92 -0
  26. build/torch29-cxx11-cu126-x86_64-linux/paged_attention/__init__.py +21 -0
  27. build/torch29-cxx11-cu126-x86_64-linux/paged_attention/_custom_ops.py +173 -0
  28. build/torch29-cxx11-cu126-x86_64-linux/paged_attention/_ops.py +9 -0
  29. build/torch29-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_287831d.abi3.so +3 -0
  30. build/torch29-cxx11-cu126-x86_64-linux/paged_attention/platforms.py +92 -0
  31. build/torch29-cxx11-cu128-x86_64-linux/paged_attention/__init__.py +21 -0
  32. build/torch29-cxx11-cu128-x86_64-linux/paged_attention/_custom_ops.py +173 -0
  33. build/torch29-cxx11-cu128-x86_64-linux/paged_attention/_ops.py +9 -0
  34. build/torch29-cxx11-cu128-x86_64-linux/paged_attention/_paged_attention_287831d.abi3.so +3 -0
  35. build/torch29-cxx11-cu128-x86_64-linux/paged_attention/platforms.py +92 -0
  36. build/torch29-cxx11-cu130-x86_64-linux/paged_attention/__init__.py +21 -0
  37. build/torch29-cxx11-cu130-x86_64-linux/paged_attention/_custom_ops.py +173 -0
  38. build/torch29-cxx11-cu130-x86_64-linux/paged_attention/_ops.py +9 -0
  39. build/torch29-cxx11-cu130-x86_64-linux/paged_attention/_paged_attention_287831d.abi3.so +3 -0
  40. build/torch29-cxx11-cu130-x86_64-linux/paged_attention/platforms.py +92 -0
  41. build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/__init__.py +21 -0
  42. build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/_custom_ops.py +173 -0
  43. build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/_ops.py +9 -0
  44. build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/_paged_attention_287831d.abi3.so +3 -0
  45. build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/platforms.py +92 -0
  46. build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/__init__.py +21 -0
  47. build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/_custom_ops.py +173 -0
  48. build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/_ops.py +9 -0
  49. build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/_paged_attention_287831d.abi3.so +3 -0
  50. build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/platforms.py +92 -0
build/torch28-cxx11-cu126-x86_64-linux/paged_attention/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._custom_ops import (
2
+ convert_fp8,
3
+ copy_blocks,
4
+ paged_attention_v1,
5
+ paged_attention_v2,
6
+ reshape_and_cache,
7
+ reshape_and_cache_flash,
8
+ swap_blocks,
9
+ )
10
+ from ._ops import ops
11
+
12
+ __all__ = [
13
+ "convert_fp8",
14
+ "copy_blocks",
15
+ "ops",
16
+ "paged_attention_v1",
17
+ "paged_attention_v2",
18
+ "reshape_and_cache",
19
+ "reshape_and_cache_flash",
20
+ "swap_blocks",
21
+ ]
build/torch28-cxx11-cu126-x86_64-linux/paged_attention/_custom_ops.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+
8
+ # page attention ops
9
+ def paged_attention_v1(
10
+ out: torch.Tensor,
11
+ query: torch.Tensor,
12
+ key_cache: torch.Tensor,
13
+ value_cache: torch.Tensor,
14
+ num_kv_heads: int,
15
+ scale: float,
16
+ block_tables: torch.Tensor,
17
+ seq_lens: torch.Tensor,
18
+ block_size: int,
19
+ max_seq_len: int,
20
+ alibi_slopes: Optional[torch.Tensor],
21
+ kv_cache_dtype: str,
22
+ k_scale: float,
23
+ v_scale: float,
24
+ tp_rank: int = 0,
25
+ blocksparse_local_blocks: int = 0,
26
+ blocksparse_vert_stride: int = 0,
27
+ blocksparse_block_size: int = 64,
28
+ blocksparse_head_sliding_step: int = 0,
29
+ ) -> None:
30
+ ops.paged_attention_v1(
31
+ out,
32
+ query,
33
+ key_cache,
34
+ value_cache,
35
+ num_kv_heads,
36
+ scale,
37
+ block_tables,
38
+ seq_lens,
39
+ block_size,
40
+ max_seq_len,
41
+ alibi_slopes,
42
+ kv_cache_dtype,
43
+ k_scale,
44
+ v_scale,
45
+ tp_rank,
46
+ blocksparse_local_blocks,
47
+ blocksparse_vert_stride,
48
+ blocksparse_block_size,
49
+ blocksparse_head_sliding_step,
50
+ )
51
+
52
+
53
+ def paged_attention_v2(
54
+ out: torch.Tensor,
55
+ exp_sum: torch.Tensor,
56
+ max_logits: torch.Tensor,
57
+ tmp_out: torch.Tensor,
58
+ query: torch.Tensor,
59
+ key_cache: torch.Tensor,
60
+ value_cache: torch.Tensor,
61
+ num_kv_heads: int,
62
+ scale: float,
63
+ block_tables: torch.Tensor,
64
+ seq_lens: torch.Tensor,
65
+ block_size: int,
66
+ max_seq_len: int,
67
+ alibi_slopes: Optional[torch.Tensor],
68
+ kv_cache_dtype: str,
69
+ k_scale: float,
70
+ v_scale: float,
71
+ tp_rank: int = 0,
72
+ blocksparse_local_blocks: int = 0,
73
+ blocksparse_vert_stride: int = 0,
74
+ blocksparse_block_size: int = 64,
75
+ blocksparse_head_sliding_step: int = 0,
76
+ ) -> None:
77
+ ops.paged_attention_v2(
78
+ out,
79
+ exp_sum,
80
+ max_logits,
81
+ tmp_out,
82
+ query,
83
+ key_cache,
84
+ value_cache,
85
+ num_kv_heads,
86
+ scale,
87
+ block_tables,
88
+ seq_lens,
89
+ block_size,
90
+ max_seq_len,
91
+ alibi_slopes,
92
+ kv_cache_dtype,
93
+ k_scale,
94
+ v_scale,
95
+ tp_rank,
96
+ blocksparse_local_blocks,
97
+ blocksparse_vert_stride,
98
+ blocksparse_block_size,
99
+ blocksparse_head_sliding_step,
100
+ )
101
+
102
+
103
+ def reshape_and_cache(
104
+ key: torch.Tensor,
105
+ value: torch.Tensor,
106
+ key_cache: torch.Tensor,
107
+ value_cache: torch.Tensor,
108
+ slot_mapping: torch.Tensor,
109
+ kv_cache_dtype: str,
110
+ k_scale: float,
111
+ v_scale: float,
112
+ ) -> None:
113
+ ops.reshape_and_cache(
114
+ key,
115
+ value,
116
+ key_cache,
117
+ value_cache,
118
+ slot_mapping,
119
+ kv_cache_dtype,
120
+ k_scale,
121
+ v_scale,
122
+ )
123
+
124
+
125
+ def reshape_and_cache_flash(
126
+ key: torch.Tensor,
127
+ value: torch.Tensor,
128
+ key_cache: torch.Tensor,
129
+ value_cache: torch.Tensor,
130
+ slot_mapping: torch.Tensor,
131
+ kv_cache_dtype: str,
132
+ k_scale: torch.Tensor,
133
+ v_scale: torch.Tensor,
134
+ ) -> None:
135
+ ops.reshape_and_cache_flash(
136
+ key,
137
+ value,
138
+ key_cache,
139
+ value_cache,
140
+ slot_mapping,
141
+ kv_cache_dtype,
142
+ k_scale,
143
+ v_scale,
144
+ )
145
+
146
+
147
+ def copy_blocks(
148
+ key_caches: List[torch.Tensor],
149
+ value_caches: List[torch.Tensor],
150
+ block_mapping: torch.Tensor,
151
+ ) -> None:
152
+ ops.copy_blocks(key_caches, value_caches, block_mapping)
153
+
154
+
155
+ def swap_blocks(
156
+ src: torch.Tensor, dst: torch.Tensor, block_mapping: torch.Tensor
157
+ ) -> None:
158
+ ops.swap_blocks(src, dst, block_mapping)
159
+
160
+
161
+ def convert_fp8(
162
+ output: torch.Tensor, input: torch.Tensor, scale: float = 1.0, kv_dtype: str = "fp8"
163
+ ) -> None:
164
+ ops.convert_fp8(output, input, scale, kv_dtype)
165
+
166
+
167
+ __all__ = [
168
+ "convert_fp8",
169
+ "paged_attention_v1",
170
+ "paged_attention_v2",
171
+ "reshape_and_cache",
172
+ "copy_blocks",
173
+ ]
build/torch28-cxx11-cu126-x86_64-linux/paged_attention/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _paged_attention_287831d
3
+ ops = torch.ops._paged_attention_287831d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_paged_attention_287831d::{op_name}"
build/torch28-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_287831d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7df0c8b8817d956e843be64c94018f7a4f11059f4efd6e0e8e09171afdaa0f4
3
+ size 85751728
build/torch28-cxx11-cu126-x86_64-linux/paged_attention/platforms.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from abc import ABC, abstractmethod
4
+ from functools import lru_cache, wraps
5
+ from typing import Callable, ParamSpec, TypeVar
6
+
7
+ import numpy as np
8
+ import torch
9
+
10
+ IS_ROCM = torch.version.hip is not None
11
+ IS_MPS = torch.backends.mps.is_available()
12
+
13
+
14
+ class Platform(ABC):
15
+ @classmethod
16
+ def seed_everything(cls, seed: int) -> None:
17
+ """
18
+ Set the seed of each random module.
19
+ `torch.manual_seed` will set seed on all devices.
20
+
21
+ Loosely based on: https://github.com/Lightning-AI/pytorch-lightning/blob/2.4.0/src/lightning/fabric/utilities/seed.py#L20
22
+ """
23
+ random.seed(seed)
24
+ np.random.seed(seed)
25
+ torch.manual_seed(seed)
26
+
27
+ @abstractmethod
28
+ def get_device_name(self, device_id: int = 0) -> str: ...
29
+
30
+ @abstractmethod
31
+ def is_cuda(self) -> bool: ...
32
+
33
+ @abstractmethod
34
+ def is_rocm(self) -> bool: ...
35
+
36
+ @abstractmethod
37
+ def is_mps(self) -> bool: ...
38
+
39
+
40
+ class CudaPlatform(Platform):
41
+ @classmethod
42
+ @lru_cache(maxsize=8)
43
+ def get_device_name(cls, device_id: int = 0) -> str:
44
+ return torch.cuda.get_device_name(0)
45
+
46
+ def is_cuda(self) -> bool:
47
+ return True
48
+
49
+ def is_rocm(self) -> bool:
50
+ return False
51
+
52
+ def is_mps(self) -> bool:
53
+ return False
54
+
55
+
56
+ class RocmPlatform(Platform):
57
+ @classmethod
58
+ @lru_cache(maxsize=8)
59
+ def get_device_name(cls, device_id: int = 0) -> str:
60
+ return torch.cuda.get_device_name(device_id)
61
+
62
+ def is_cuda(self) -> bool:
63
+ return False
64
+
65
+ def is_rocm(self) -> bool:
66
+ return True
67
+
68
+ def is_mps(self) -> bool:
69
+ return False
70
+
71
+
72
+ class MpsPlatform(Platform):
73
+ @classmethod
74
+ @lru_cache(maxsize=8)
75
+ def get_device_name(cls, device_id: int = 0) -> str:
76
+ return torch.cuda.get_device_name(device_id)
77
+
78
+ def is_cuda(self) -> bool:
79
+ return False
80
+
81
+ def is_rocm(self) -> bool:
82
+ return False
83
+
84
+ def is_mps(self) -> bool:
85
+ return True
86
+
87
+ current_platform = (
88
+ RocmPlatform() if IS_ROCM else
89
+ MpsPlatform() if IS_MPS else
90
+ CudaPlatform() if torch.cuda.is_available() else
91
+ None
92
+ )
build/torch28-cxx11-cu128-x86_64-linux/paged_attention/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._custom_ops import (
2
+ convert_fp8,
3
+ copy_blocks,
4
+ paged_attention_v1,
5
+ paged_attention_v2,
6
+ reshape_and_cache,
7
+ reshape_and_cache_flash,
8
+ swap_blocks,
9
+ )
10
+ from ._ops import ops
11
+
12
+ __all__ = [
13
+ "convert_fp8",
14
+ "copy_blocks",
15
+ "ops",
16
+ "paged_attention_v1",
17
+ "paged_attention_v2",
18
+ "reshape_and_cache",
19
+ "reshape_and_cache_flash",
20
+ "swap_blocks",
21
+ ]
build/torch28-cxx11-cu128-x86_64-linux/paged_attention/_custom_ops.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+
8
+ # page attention ops
9
+ def paged_attention_v1(
10
+ out: torch.Tensor,
11
+ query: torch.Tensor,
12
+ key_cache: torch.Tensor,
13
+ value_cache: torch.Tensor,
14
+ num_kv_heads: int,
15
+ scale: float,
16
+ block_tables: torch.Tensor,
17
+ seq_lens: torch.Tensor,
18
+ block_size: int,
19
+ max_seq_len: int,
20
+ alibi_slopes: Optional[torch.Tensor],
21
+ kv_cache_dtype: str,
22
+ k_scale: float,
23
+ v_scale: float,
24
+ tp_rank: int = 0,
25
+ blocksparse_local_blocks: int = 0,
26
+ blocksparse_vert_stride: int = 0,
27
+ blocksparse_block_size: int = 64,
28
+ blocksparse_head_sliding_step: int = 0,
29
+ ) -> None:
30
+ ops.paged_attention_v1(
31
+ out,
32
+ query,
33
+ key_cache,
34
+ value_cache,
35
+ num_kv_heads,
36
+ scale,
37
+ block_tables,
38
+ seq_lens,
39
+ block_size,
40
+ max_seq_len,
41
+ alibi_slopes,
42
+ kv_cache_dtype,
43
+ k_scale,
44
+ v_scale,
45
+ tp_rank,
46
+ blocksparse_local_blocks,
47
+ blocksparse_vert_stride,
48
+ blocksparse_block_size,
49
+ blocksparse_head_sliding_step,
50
+ )
51
+
52
+
53
+ def paged_attention_v2(
54
+ out: torch.Tensor,
55
+ exp_sum: torch.Tensor,
56
+ max_logits: torch.Tensor,
57
+ tmp_out: torch.Tensor,
58
+ query: torch.Tensor,
59
+ key_cache: torch.Tensor,
60
+ value_cache: torch.Tensor,
61
+ num_kv_heads: int,
62
+ scale: float,
63
+ block_tables: torch.Tensor,
64
+ seq_lens: torch.Tensor,
65
+ block_size: int,
66
+ max_seq_len: int,
67
+ alibi_slopes: Optional[torch.Tensor],
68
+ kv_cache_dtype: str,
69
+ k_scale: float,
70
+ v_scale: float,
71
+ tp_rank: int = 0,
72
+ blocksparse_local_blocks: int = 0,
73
+ blocksparse_vert_stride: int = 0,
74
+ blocksparse_block_size: int = 64,
75
+ blocksparse_head_sliding_step: int = 0,
76
+ ) -> None:
77
+ ops.paged_attention_v2(
78
+ out,
79
+ exp_sum,
80
+ max_logits,
81
+ tmp_out,
82
+ query,
83
+ key_cache,
84
+ value_cache,
85
+ num_kv_heads,
86
+ scale,
87
+ block_tables,
88
+ seq_lens,
89
+ block_size,
90
+ max_seq_len,
91
+ alibi_slopes,
92
+ kv_cache_dtype,
93
+ k_scale,
94
+ v_scale,
95
+ tp_rank,
96
+ blocksparse_local_blocks,
97
+ blocksparse_vert_stride,
98
+ blocksparse_block_size,
99
+ blocksparse_head_sliding_step,
100
+ )
101
+
102
+
103
+ def reshape_and_cache(
104
+ key: torch.Tensor,
105
+ value: torch.Tensor,
106
+ key_cache: torch.Tensor,
107
+ value_cache: torch.Tensor,
108
+ slot_mapping: torch.Tensor,
109
+ kv_cache_dtype: str,
110
+ k_scale: float,
111
+ v_scale: float,
112
+ ) -> None:
113
+ ops.reshape_and_cache(
114
+ key,
115
+ value,
116
+ key_cache,
117
+ value_cache,
118
+ slot_mapping,
119
+ kv_cache_dtype,
120
+ k_scale,
121
+ v_scale,
122
+ )
123
+
124
+
125
+ def reshape_and_cache_flash(
126
+ key: torch.Tensor,
127
+ value: torch.Tensor,
128
+ key_cache: torch.Tensor,
129
+ value_cache: torch.Tensor,
130
+ slot_mapping: torch.Tensor,
131
+ kv_cache_dtype: str,
132
+ k_scale: torch.Tensor,
133
+ v_scale: torch.Tensor,
134
+ ) -> None:
135
+ ops.reshape_and_cache_flash(
136
+ key,
137
+ value,
138
+ key_cache,
139
+ value_cache,
140
+ slot_mapping,
141
+ kv_cache_dtype,
142
+ k_scale,
143
+ v_scale,
144
+ )
145
+
146
+
147
+ def copy_blocks(
148
+ key_caches: List[torch.Tensor],
149
+ value_caches: List[torch.Tensor],
150
+ block_mapping: torch.Tensor,
151
+ ) -> None:
152
+ ops.copy_blocks(key_caches, value_caches, block_mapping)
153
+
154
+
155
+ def swap_blocks(
156
+ src: torch.Tensor, dst: torch.Tensor, block_mapping: torch.Tensor
157
+ ) -> None:
158
+ ops.swap_blocks(src, dst, block_mapping)
159
+
160
+
161
+ def convert_fp8(
162
+ output: torch.Tensor, input: torch.Tensor, scale: float = 1.0, kv_dtype: str = "fp8"
163
+ ) -> None:
164
+ ops.convert_fp8(output, input, scale, kv_dtype)
165
+
166
+
167
+ __all__ = [
168
+ "convert_fp8",
169
+ "paged_attention_v1",
170
+ "paged_attention_v2",
171
+ "reshape_and_cache",
172
+ "copy_blocks",
173
+ ]
build/torch28-cxx11-cu128-x86_64-linux/paged_attention/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _paged_attention_287831d
3
+ ops = torch.ops._paged_attention_287831d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_paged_attention_287831d::{op_name}"
build/torch28-cxx11-cu128-x86_64-linux/paged_attention/_paged_attention_287831d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a60fe3114de0950f07a243dd41c6da8805d293fd649de6fc580ac585ffb19ca2
3
+ size 102693952
build/torch28-cxx11-cu128-x86_64-linux/paged_attention/platforms.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from abc import ABC, abstractmethod
4
+ from functools import lru_cache, wraps
5
+ from typing import Callable, ParamSpec, TypeVar
6
+
7
+ import numpy as np
8
+ import torch
9
+
10
+ IS_ROCM = torch.version.hip is not None
11
+ IS_MPS = torch.backends.mps.is_available()
12
+
13
+
14
+ class Platform(ABC):
15
+ @classmethod
16
+ def seed_everything(cls, seed: int) -> None:
17
+ """
18
+ Set the seed of each random module.
19
+ `torch.manual_seed` will set seed on all devices.
20
+
21
+ Loosely based on: https://github.com/Lightning-AI/pytorch-lightning/blob/2.4.0/src/lightning/fabric/utilities/seed.py#L20
22
+ """
23
+ random.seed(seed)
24
+ np.random.seed(seed)
25
+ torch.manual_seed(seed)
26
+
27
+ @abstractmethod
28
+ def get_device_name(self, device_id: int = 0) -> str: ...
29
+
30
+ @abstractmethod
31
+ def is_cuda(self) -> bool: ...
32
+
33
+ @abstractmethod
34
+ def is_rocm(self) -> bool: ...
35
+
36
+ @abstractmethod
37
+ def is_mps(self) -> bool: ...
38
+
39
+
40
+ class CudaPlatform(Platform):
41
+ @classmethod
42
+ @lru_cache(maxsize=8)
43
+ def get_device_name(cls, device_id: int = 0) -> str:
44
+ return torch.cuda.get_device_name(0)
45
+
46
+ def is_cuda(self) -> bool:
47
+ return True
48
+
49
+ def is_rocm(self) -> bool:
50
+ return False
51
+
52
+ def is_mps(self) -> bool:
53
+ return False
54
+
55
+
56
+ class RocmPlatform(Platform):
57
+ @classmethod
58
+ @lru_cache(maxsize=8)
59
+ def get_device_name(cls, device_id: int = 0) -> str:
60
+ return torch.cuda.get_device_name(device_id)
61
+
62
+ def is_cuda(self) -> bool:
63
+ return False
64
+
65
+ def is_rocm(self) -> bool:
66
+ return True
67
+
68
+ def is_mps(self) -> bool:
69
+ return False
70
+
71
+
72
+ class MpsPlatform(Platform):
73
+ @classmethod
74
+ @lru_cache(maxsize=8)
75
+ def get_device_name(cls, device_id: int = 0) -> str:
76
+ return torch.cuda.get_device_name(device_id)
77
+
78
+ def is_cuda(self) -> bool:
79
+ return False
80
+
81
+ def is_rocm(self) -> bool:
82
+ return False
83
+
84
+ def is_mps(self) -> bool:
85
+ return True
86
+
87
+ current_platform = (
88
+ RocmPlatform() if IS_ROCM else
89
+ MpsPlatform() if IS_MPS else
90
+ CudaPlatform() if torch.cuda.is_available() else
91
+ None
92
+ )
build/torch28-cxx11-cu129-x86_64-linux/paged_attention/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._custom_ops import (
2
+ convert_fp8,
3
+ copy_blocks,
4
+ paged_attention_v1,
5
+ paged_attention_v2,
6
+ reshape_and_cache,
7
+ reshape_and_cache_flash,
8
+ swap_blocks,
9
+ )
10
+ from ._ops import ops
11
+
12
+ __all__ = [
13
+ "convert_fp8",
14
+ "copy_blocks",
15
+ "ops",
16
+ "paged_attention_v1",
17
+ "paged_attention_v2",
18
+ "reshape_and_cache",
19
+ "reshape_and_cache_flash",
20
+ "swap_blocks",
21
+ ]
build/torch28-cxx11-cu129-x86_64-linux/paged_attention/_custom_ops.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+
8
+ # page attention ops
9
+ def paged_attention_v1(
10
+ out: torch.Tensor,
11
+ query: torch.Tensor,
12
+ key_cache: torch.Tensor,
13
+ value_cache: torch.Tensor,
14
+ num_kv_heads: int,
15
+ scale: float,
16
+ block_tables: torch.Tensor,
17
+ seq_lens: torch.Tensor,
18
+ block_size: int,
19
+ max_seq_len: int,
20
+ alibi_slopes: Optional[torch.Tensor],
21
+ kv_cache_dtype: str,
22
+ k_scale: float,
23
+ v_scale: float,
24
+ tp_rank: int = 0,
25
+ blocksparse_local_blocks: int = 0,
26
+ blocksparse_vert_stride: int = 0,
27
+ blocksparse_block_size: int = 64,
28
+ blocksparse_head_sliding_step: int = 0,
29
+ ) -> None:
30
+ ops.paged_attention_v1(
31
+ out,
32
+ query,
33
+ key_cache,
34
+ value_cache,
35
+ num_kv_heads,
36
+ scale,
37
+ block_tables,
38
+ seq_lens,
39
+ block_size,
40
+ max_seq_len,
41
+ alibi_slopes,
42
+ kv_cache_dtype,
43
+ k_scale,
44
+ v_scale,
45
+ tp_rank,
46
+ blocksparse_local_blocks,
47
+ blocksparse_vert_stride,
48
+ blocksparse_block_size,
49
+ blocksparse_head_sliding_step,
50
+ )
51
+
52
+
53
+ def paged_attention_v2(
54
+ out: torch.Tensor,
55
+ exp_sum: torch.Tensor,
56
+ max_logits: torch.Tensor,
57
+ tmp_out: torch.Tensor,
58
+ query: torch.Tensor,
59
+ key_cache: torch.Tensor,
60
+ value_cache: torch.Tensor,
61
+ num_kv_heads: int,
62
+ scale: float,
63
+ block_tables: torch.Tensor,
64
+ seq_lens: torch.Tensor,
65
+ block_size: int,
66
+ max_seq_len: int,
67
+ alibi_slopes: Optional[torch.Tensor],
68
+ kv_cache_dtype: str,
69
+ k_scale: float,
70
+ v_scale: float,
71
+ tp_rank: int = 0,
72
+ blocksparse_local_blocks: int = 0,
73
+ blocksparse_vert_stride: int = 0,
74
+ blocksparse_block_size: int = 64,
75
+ blocksparse_head_sliding_step: int = 0,
76
+ ) -> None:
77
+ ops.paged_attention_v2(
78
+ out,
79
+ exp_sum,
80
+ max_logits,
81
+ tmp_out,
82
+ query,
83
+ key_cache,
84
+ value_cache,
85
+ num_kv_heads,
86
+ scale,
87
+ block_tables,
88
+ seq_lens,
89
+ block_size,
90
+ max_seq_len,
91
+ alibi_slopes,
92
+ kv_cache_dtype,
93
+ k_scale,
94
+ v_scale,
95
+ tp_rank,
96
+ blocksparse_local_blocks,
97
+ blocksparse_vert_stride,
98
+ blocksparse_block_size,
99
+ blocksparse_head_sliding_step,
100
+ )
101
+
102
+
103
+ def reshape_and_cache(
104
+ key: torch.Tensor,
105
+ value: torch.Tensor,
106
+ key_cache: torch.Tensor,
107
+ value_cache: torch.Tensor,
108
+ slot_mapping: torch.Tensor,
109
+ kv_cache_dtype: str,
110
+ k_scale: float,
111
+ v_scale: float,
112
+ ) -> None:
113
+ ops.reshape_and_cache(
114
+ key,
115
+ value,
116
+ key_cache,
117
+ value_cache,
118
+ slot_mapping,
119
+ kv_cache_dtype,
120
+ k_scale,
121
+ v_scale,
122
+ )
123
+
124
+
125
+ def reshape_and_cache_flash(
126
+ key: torch.Tensor,
127
+ value: torch.Tensor,
128
+ key_cache: torch.Tensor,
129
+ value_cache: torch.Tensor,
130
+ slot_mapping: torch.Tensor,
131
+ kv_cache_dtype: str,
132
+ k_scale: torch.Tensor,
133
+ v_scale: torch.Tensor,
134
+ ) -> None:
135
+ ops.reshape_and_cache_flash(
136
+ key,
137
+ value,
138
+ key_cache,
139
+ value_cache,
140
+ slot_mapping,
141
+ kv_cache_dtype,
142
+ k_scale,
143
+ v_scale,
144
+ )
145
+
146
+
147
+ def copy_blocks(
148
+ key_caches: List[torch.Tensor],
149
+ value_caches: List[torch.Tensor],
150
+ block_mapping: torch.Tensor,
151
+ ) -> None:
152
+ ops.copy_blocks(key_caches, value_caches, block_mapping)
153
+
154
+
155
+ def swap_blocks(
156
+ src: torch.Tensor, dst: torch.Tensor, block_mapping: torch.Tensor
157
+ ) -> None:
158
+ ops.swap_blocks(src, dst, block_mapping)
159
+
160
+
161
+ def convert_fp8(
162
+ output: torch.Tensor, input: torch.Tensor, scale: float = 1.0, kv_dtype: str = "fp8"
163
+ ) -> None:
164
+ ops.convert_fp8(output, input, scale, kv_dtype)
165
+
166
+
167
+ __all__ = [
168
+ "convert_fp8",
169
+ "paged_attention_v1",
170
+ "paged_attention_v2",
171
+ "reshape_and_cache",
172
+ "copy_blocks",
173
+ ]
build/torch28-cxx11-cu129-x86_64-linux/paged_attention/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _paged_attention_287831d
3
+ ops = torch.ops._paged_attention_287831d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_paged_attention_287831d::{op_name}"
build/torch28-cxx11-cu129-x86_64-linux/paged_attention/_paged_attention_287831d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bea4028251eaee2c98c67b9b20dbf575b49ab4c747bb31f6c8a06a0870ba969
3
+ size 111214600
build/torch28-cxx11-cu129-x86_64-linux/paged_attention/platforms.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from abc import ABC, abstractmethod
4
+ from functools import lru_cache, wraps
5
+ from typing import Callable, ParamSpec, TypeVar
6
+
7
+ import numpy as np
8
+ import torch
9
+
10
+ IS_ROCM = torch.version.hip is not None
11
+ IS_MPS = torch.backends.mps.is_available()
12
+
13
+
14
+ class Platform(ABC):
15
+ @classmethod
16
+ def seed_everything(cls, seed: int) -> None:
17
+ """
18
+ Set the seed of each random module.
19
+ `torch.manual_seed` will set seed on all devices.
20
+
21
+ Loosely based on: https://github.com/Lightning-AI/pytorch-lightning/blob/2.4.0/src/lightning/fabric/utilities/seed.py#L20
22
+ """
23
+ random.seed(seed)
24
+ np.random.seed(seed)
25
+ torch.manual_seed(seed)
26
+
27
+ @abstractmethod
28
+ def get_device_name(self, device_id: int = 0) -> str: ...
29
+
30
+ @abstractmethod
31
+ def is_cuda(self) -> bool: ...
32
+
33
+ @abstractmethod
34
+ def is_rocm(self) -> bool: ...
35
+
36
+ @abstractmethod
37
+ def is_mps(self) -> bool: ...
38
+
39
+
40
+ class CudaPlatform(Platform):
41
+ @classmethod
42
+ @lru_cache(maxsize=8)
43
+ def get_device_name(cls, device_id: int = 0) -> str:
44
+ return torch.cuda.get_device_name(0)
45
+
46
+ def is_cuda(self) -> bool:
47
+ return True
48
+
49
+ def is_rocm(self) -> bool:
50
+ return False
51
+
52
+ def is_mps(self) -> bool:
53
+ return False
54
+
55
+
56
+ class RocmPlatform(Platform):
57
+ @classmethod
58
+ @lru_cache(maxsize=8)
59
+ def get_device_name(cls, device_id: int = 0) -> str:
60
+ return torch.cuda.get_device_name(device_id)
61
+
62
+ def is_cuda(self) -> bool:
63
+ return False
64
+
65
+ def is_rocm(self) -> bool:
66
+ return True
67
+
68
+ def is_mps(self) -> bool:
69
+ return False
70
+
71
+
72
+ class MpsPlatform(Platform):
73
+ @classmethod
74
+ @lru_cache(maxsize=8)
75
+ def get_device_name(cls, device_id: int = 0) -> str:
76
+ return torch.cuda.get_device_name(device_id)
77
+
78
+ def is_cuda(self) -> bool:
79
+ return False
80
+
81
+ def is_rocm(self) -> bool:
82
+ return False
83
+
84
+ def is_mps(self) -> bool:
85
+ return True
86
+
87
+ current_platform = (
88
+ RocmPlatform() if IS_ROCM else
89
+ MpsPlatform() if IS_MPS else
90
+ CudaPlatform() if torch.cuda.is_available() else
91
+ None
92
+ )
build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._custom_ops import (
2
+ convert_fp8,
3
+ copy_blocks,
4
+ paged_attention_v1,
5
+ paged_attention_v2,
6
+ reshape_and_cache,
7
+ reshape_and_cache_flash,
8
+ swap_blocks,
9
+ )
10
+ from ._ops import ops
11
+
12
+ __all__ = [
13
+ "convert_fp8",
14
+ "copy_blocks",
15
+ "ops",
16
+ "paged_attention_v1",
17
+ "paged_attention_v2",
18
+ "reshape_and_cache",
19
+ "reshape_and_cache_flash",
20
+ "swap_blocks",
21
+ ]
build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/_custom_ops.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+
8
+ # page attention ops
9
+ def paged_attention_v1(
10
+ out: torch.Tensor,
11
+ query: torch.Tensor,
12
+ key_cache: torch.Tensor,
13
+ value_cache: torch.Tensor,
14
+ num_kv_heads: int,
15
+ scale: float,
16
+ block_tables: torch.Tensor,
17
+ seq_lens: torch.Tensor,
18
+ block_size: int,
19
+ max_seq_len: int,
20
+ alibi_slopes: Optional[torch.Tensor],
21
+ kv_cache_dtype: str,
22
+ k_scale: float,
23
+ v_scale: float,
24
+ tp_rank: int = 0,
25
+ blocksparse_local_blocks: int = 0,
26
+ blocksparse_vert_stride: int = 0,
27
+ blocksparse_block_size: int = 64,
28
+ blocksparse_head_sliding_step: int = 0,
29
+ ) -> None:
30
+ ops.paged_attention_v1(
31
+ out,
32
+ query,
33
+ key_cache,
34
+ value_cache,
35
+ num_kv_heads,
36
+ scale,
37
+ block_tables,
38
+ seq_lens,
39
+ block_size,
40
+ max_seq_len,
41
+ alibi_slopes,
42
+ kv_cache_dtype,
43
+ k_scale,
44
+ v_scale,
45
+ tp_rank,
46
+ blocksparse_local_blocks,
47
+ blocksparse_vert_stride,
48
+ blocksparse_block_size,
49
+ blocksparse_head_sliding_step,
50
+ )
51
+
52
+
53
+ def paged_attention_v2(
54
+ out: torch.Tensor,
55
+ exp_sum: torch.Tensor,
56
+ max_logits: torch.Tensor,
57
+ tmp_out: torch.Tensor,
58
+ query: torch.Tensor,
59
+ key_cache: torch.Tensor,
60
+ value_cache: torch.Tensor,
61
+ num_kv_heads: int,
62
+ scale: float,
63
+ block_tables: torch.Tensor,
64
+ seq_lens: torch.Tensor,
65
+ block_size: int,
66
+ max_seq_len: int,
67
+ alibi_slopes: Optional[torch.Tensor],
68
+ kv_cache_dtype: str,
69
+ k_scale: float,
70
+ v_scale: float,
71
+ tp_rank: int = 0,
72
+ blocksparse_local_blocks: int = 0,
73
+ blocksparse_vert_stride: int = 0,
74
+ blocksparse_block_size: int = 64,
75
+ blocksparse_head_sliding_step: int = 0,
76
+ ) -> None:
77
+ ops.paged_attention_v2(
78
+ out,
79
+ exp_sum,
80
+ max_logits,
81
+ tmp_out,
82
+ query,
83
+ key_cache,
84
+ value_cache,
85
+ num_kv_heads,
86
+ scale,
87
+ block_tables,
88
+ seq_lens,
89
+ block_size,
90
+ max_seq_len,
91
+ alibi_slopes,
92
+ kv_cache_dtype,
93
+ k_scale,
94
+ v_scale,
95
+ tp_rank,
96
+ blocksparse_local_blocks,
97
+ blocksparse_vert_stride,
98
+ blocksparse_block_size,
99
+ blocksparse_head_sliding_step,
100
+ )
101
+
102
+
103
+ def reshape_and_cache(
104
+ key: torch.Tensor,
105
+ value: torch.Tensor,
106
+ key_cache: torch.Tensor,
107
+ value_cache: torch.Tensor,
108
+ slot_mapping: torch.Tensor,
109
+ kv_cache_dtype: str,
110
+ k_scale: float,
111
+ v_scale: float,
112
+ ) -> None:
113
+ ops.reshape_and_cache(
114
+ key,
115
+ value,
116
+ key_cache,
117
+ value_cache,
118
+ slot_mapping,
119
+ kv_cache_dtype,
120
+ k_scale,
121
+ v_scale,
122
+ )
123
+
124
+
125
+ def reshape_and_cache_flash(
126
+ key: torch.Tensor,
127
+ value: torch.Tensor,
128
+ key_cache: torch.Tensor,
129
+ value_cache: torch.Tensor,
130
+ slot_mapping: torch.Tensor,
131
+ kv_cache_dtype: str,
132
+ k_scale: torch.Tensor,
133
+ v_scale: torch.Tensor,
134
+ ) -> None:
135
+ ops.reshape_and_cache_flash(
136
+ key,
137
+ value,
138
+ key_cache,
139
+ value_cache,
140
+ slot_mapping,
141
+ kv_cache_dtype,
142
+ k_scale,
143
+ v_scale,
144
+ )
145
+
146
+
147
+ def copy_blocks(
148
+ key_caches: List[torch.Tensor],
149
+ value_caches: List[torch.Tensor],
150
+ block_mapping: torch.Tensor,
151
+ ) -> None:
152
+ ops.copy_blocks(key_caches, value_caches, block_mapping)
153
+
154
+
155
+ def swap_blocks(
156
+ src: torch.Tensor, dst: torch.Tensor, block_mapping: torch.Tensor
157
+ ) -> None:
158
+ ops.swap_blocks(src, dst, block_mapping)
159
+
160
+
161
+ def convert_fp8(
162
+ output: torch.Tensor, input: torch.Tensor, scale: float = 1.0, kv_dtype: str = "fp8"
163
+ ) -> None:
164
+ ops.convert_fp8(output, input, scale, kv_dtype)
165
+
166
+
167
+ __all__ = [
168
+ "convert_fp8",
169
+ "paged_attention_v1",
170
+ "paged_attention_v2",
171
+ "reshape_and_cache",
172
+ "copy_blocks",
173
+ ]
build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _paged_attention_287831d
3
+ ops = torch.ops._paged_attention_287831d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_paged_attention_287831d::{op_name}"
build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/_paged_attention_287831d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52c29c39d300ade26ca567fe96db040df2bed151dd35adf24c74a41469fee54b
3
+ size 120179024
build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/platforms.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from abc import ABC, abstractmethod
4
+ from functools import lru_cache, wraps
5
+ from typing import Callable, ParamSpec, TypeVar
6
+
7
+ import numpy as np
8
+ import torch
9
+
10
+ IS_ROCM = torch.version.hip is not None
11
+ IS_MPS = torch.backends.mps.is_available()
12
+
13
+
14
+ class Platform(ABC):
15
+ @classmethod
16
+ def seed_everything(cls, seed: int) -> None:
17
+ """
18
+ Set the seed of each random module.
19
+ `torch.manual_seed` will set seed on all devices.
20
+
21
+ Loosely based on: https://github.com/Lightning-AI/pytorch-lightning/blob/2.4.0/src/lightning/fabric/utilities/seed.py#L20
22
+ """
23
+ random.seed(seed)
24
+ np.random.seed(seed)
25
+ torch.manual_seed(seed)
26
+
27
+ @abstractmethod
28
+ def get_device_name(self, device_id: int = 0) -> str: ...
29
+
30
+ @abstractmethod
31
+ def is_cuda(self) -> bool: ...
32
+
33
+ @abstractmethod
34
+ def is_rocm(self) -> bool: ...
35
+
36
+ @abstractmethod
37
+ def is_mps(self) -> bool: ...
38
+
39
+
40
+ class CudaPlatform(Platform):
41
+ @classmethod
42
+ @lru_cache(maxsize=8)
43
+ def get_device_name(cls, device_id: int = 0) -> str:
44
+ return torch.cuda.get_device_name(0)
45
+
46
+ def is_cuda(self) -> bool:
47
+ return True
48
+
49
+ def is_rocm(self) -> bool:
50
+ return False
51
+
52
+ def is_mps(self) -> bool:
53
+ return False
54
+
55
+
56
+ class RocmPlatform(Platform):
57
+ @classmethod
58
+ @lru_cache(maxsize=8)
59
+ def get_device_name(cls, device_id: int = 0) -> str:
60
+ return torch.cuda.get_device_name(device_id)
61
+
62
+ def is_cuda(self) -> bool:
63
+ return False
64
+
65
+ def is_rocm(self) -> bool:
66
+ return True
67
+
68
+ def is_mps(self) -> bool:
69
+ return False
70
+
71
+
72
+ class MpsPlatform(Platform):
73
+ @classmethod
74
+ @lru_cache(maxsize=8)
75
+ def get_device_name(cls, device_id: int = 0) -> str:
76
+ return torch.cuda.get_device_name(device_id)
77
+
78
+ def is_cuda(self) -> bool:
79
+ return False
80
+
81
+ def is_rocm(self) -> bool:
82
+ return False
83
+
84
+ def is_mps(self) -> bool:
85
+ return True
86
+
87
+ current_platform = (
88
+ RocmPlatform() if IS_ROCM else
89
+ MpsPlatform() if IS_MPS else
90
+ CudaPlatform() if torch.cuda.is_available() else
91
+ None
92
+ )
build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._custom_ops import (
2
+ convert_fp8,
3
+ copy_blocks,
4
+ paged_attention_v1,
5
+ paged_attention_v2,
6
+ reshape_and_cache,
7
+ reshape_and_cache_flash,
8
+ swap_blocks,
9
+ )
10
+ from ._ops import ops
11
+
12
+ __all__ = [
13
+ "convert_fp8",
14
+ "copy_blocks",
15
+ "ops",
16
+ "paged_attention_v1",
17
+ "paged_attention_v2",
18
+ "reshape_and_cache",
19
+ "reshape_and_cache_flash",
20
+ "swap_blocks",
21
+ ]
build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/_custom_ops.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+
8
+ # page attention ops
9
+ def paged_attention_v1(
10
+ out: torch.Tensor,
11
+ query: torch.Tensor,
12
+ key_cache: torch.Tensor,
13
+ value_cache: torch.Tensor,
14
+ num_kv_heads: int,
15
+ scale: float,
16
+ block_tables: torch.Tensor,
17
+ seq_lens: torch.Tensor,
18
+ block_size: int,
19
+ max_seq_len: int,
20
+ alibi_slopes: Optional[torch.Tensor],
21
+ kv_cache_dtype: str,
22
+ k_scale: float,
23
+ v_scale: float,
24
+ tp_rank: int = 0,
25
+ blocksparse_local_blocks: int = 0,
26
+ blocksparse_vert_stride: int = 0,
27
+ blocksparse_block_size: int = 64,
28
+ blocksparse_head_sliding_step: int = 0,
29
+ ) -> None:
30
+ ops.paged_attention_v1(
31
+ out,
32
+ query,
33
+ key_cache,
34
+ value_cache,
35
+ num_kv_heads,
36
+ scale,
37
+ block_tables,
38
+ seq_lens,
39
+ block_size,
40
+ max_seq_len,
41
+ alibi_slopes,
42
+ kv_cache_dtype,
43
+ k_scale,
44
+ v_scale,
45
+ tp_rank,
46
+ blocksparse_local_blocks,
47
+ blocksparse_vert_stride,
48
+ blocksparse_block_size,
49
+ blocksparse_head_sliding_step,
50
+ )
51
+
52
+
53
+ def paged_attention_v2(
54
+ out: torch.Tensor,
55
+ exp_sum: torch.Tensor,
56
+ max_logits: torch.Tensor,
57
+ tmp_out: torch.Tensor,
58
+ query: torch.Tensor,
59
+ key_cache: torch.Tensor,
60
+ value_cache: torch.Tensor,
61
+ num_kv_heads: int,
62
+ scale: float,
63
+ block_tables: torch.Tensor,
64
+ seq_lens: torch.Tensor,
65
+ block_size: int,
66
+ max_seq_len: int,
67
+ alibi_slopes: Optional[torch.Tensor],
68
+ kv_cache_dtype: str,
69
+ k_scale: float,
70
+ v_scale: float,
71
+ tp_rank: int = 0,
72
+ blocksparse_local_blocks: int = 0,
73
+ blocksparse_vert_stride: int = 0,
74
+ blocksparse_block_size: int = 64,
75
+ blocksparse_head_sliding_step: int = 0,
76
+ ) -> None:
77
+ ops.paged_attention_v2(
78
+ out,
79
+ exp_sum,
80
+ max_logits,
81
+ tmp_out,
82
+ query,
83
+ key_cache,
84
+ value_cache,
85
+ num_kv_heads,
86
+ scale,
87
+ block_tables,
88
+ seq_lens,
89
+ block_size,
90
+ max_seq_len,
91
+ alibi_slopes,
92
+ kv_cache_dtype,
93
+ k_scale,
94
+ v_scale,
95
+ tp_rank,
96
+ blocksparse_local_blocks,
97
+ blocksparse_vert_stride,
98
+ blocksparse_block_size,
99
+ blocksparse_head_sliding_step,
100
+ )
101
+
102
+
103
+ def reshape_and_cache(
104
+ key: torch.Tensor,
105
+ value: torch.Tensor,
106
+ key_cache: torch.Tensor,
107
+ value_cache: torch.Tensor,
108
+ slot_mapping: torch.Tensor,
109
+ kv_cache_dtype: str,
110
+ k_scale: float,
111
+ v_scale: float,
112
+ ) -> None:
113
+ ops.reshape_and_cache(
114
+ key,
115
+ value,
116
+ key_cache,
117
+ value_cache,
118
+ slot_mapping,
119
+ kv_cache_dtype,
120
+ k_scale,
121
+ v_scale,
122
+ )
123
+
124
+
125
+ def reshape_and_cache_flash(
126
+ key: torch.Tensor,
127
+ value: torch.Tensor,
128
+ key_cache: torch.Tensor,
129
+ value_cache: torch.Tensor,
130
+ slot_mapping: torch.Tensor,
131
+ kv_cache_dtype: str,
132
+ k_scale: torch.Tensor,
133
+ v_scale: torch.Tensor,
134
+ ) -> None:
135
+ ops.reshape_and_cache_flash(
136
+ key,
137
+ value,
138
+ key_cache,
139
+ value_cache,
140
+ slot_mapping,
141
+ kv_cache_dtype,
142
+ k_scale,
143
+ v_scale,
144
+ )
145
+
146
+
147
+ def copy_blocks(
148
+ key_caches: List[torch.Tensor],
149
+ value_caches: List[torch.Tensor],
150
+ block_mapping: torch.Tensor,
151
+ ) -> None:
152
+ ops.copy_blocks(key_caches, value_caches, block_mapping)
153
+
154
+
155
+ def swap_blocks(
156
+ src: torch.Tensor, dst: torch.Tensor, block_mapping: torch.Tensor
157
+ ) -> None:
158
+ ops.swap_blocks(src, dst, block_mapping)
159
+
160
+
161
+ def convert_fp8(
162
+ output: torch.Tensor, input: torch.Tensor, scale: float = 1.0, kv_dtype: str = "fp8"
163
+ ) -> None:
164
+ ops.convert_fp8(output, input, scale, kv_dtype)
165
+
166
+
167
+ __all__ = [
168
+ "convert_fp8",
169
+ "paged_attention_v1",
170
+ "paged_attention_v2",
171
+ "reshape_and_cache",
172
+ "copy_blocks",
173
+ ]
build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _paged_attention_287831d
3
+ ops = torch.ops._paged_attention_287831d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_paged_attention_287831d::{op_name}"
build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/_paged_attention_287831d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ef6061f268d5e65216889615ed121e438d29c93ddaffef959b3c66d5219bb55
3
+ size 121012632
build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/platforms.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from abc import ABC, abstractmethod
4
+ from functools import lru_cache, wraps
5
+ from typing import Callable, ParamSpec, TypeVar
6
+
7
+ import numpy as np
8
+ import torch
9
+
10
+ IS_ROCM = torch.version.hip is not None
11
+ IS_MPS = torch.backends.mps.is_available()
12
+
13
+
14
+ class Platform(ABC):
15
+ @classmethod
16
+ def seed_everything(cls, seed: int) -> None:
17
+ """
18
+ Set the seed of each random module.
19
+ `torch.manual_seed` will set seed on all devices.
20
+
21
+ Loosely based on: https://github.com/Lightning-AI/pytorch-lightning/blob/2.4.0/src/lightning/fabric/utilities/seed.py#L20
22
+ """
23
+ random.seed(seed)
24
+ np.random.seed(seed)
25
+ torch.manual_seed(seed)
26
+
27
+ @abstractmethod
28
+ def get_device_name(self, device_id: int = 0) -> str: ...
29
+
30
+ @abstractmethod
31
+ def is_cuda(self) -> bool: ...
32
+
33
+ @abstractmethod
34
+ def is_rocm(self) -> bool: ...
35
+
36
+ @abstractmethod
37
+ def is_mps(self) -> bool: ...
38
+
39
+
40
+ class CudaPlatform(Platform):
41
+ @classmethod
42
+ @lru_cache(maxsize=8)
43
+ def get_device_name(cls, device_id: int = 0) -> str:
44
+ return torch.cuda.get_device_name(0)
45
+
46
+ def is_cuda(self) -> bool:
47
+ return True
48
+
49
+ def is_rocm(self) -> bool:
50
+ return False
51
+
52
+ def is_mps(self) -> bool:
53
+ return False
54
+
55
+
56
+ class RocmPlatform(Platform):
57
+ @classmethod
58
+ @lru_cache(maxsize=8)
59
+ def get_device_name(cls, device_id: int = 0) -> str:
60
+ return torch.cuda.get_device_name(device_id)
61
+
62
+ def is_cuda(self) -> bool:
63
+ return False
64
+
65
+ def is_rocm(self) -> bool:
66
+ return True
67
+
68
+ def is_mps(self) -> bool:
69
+ return False
70
+
71
+
72
+ class MpsPlatform(Platform):
73
+ @classmethod
74
+ @lru_cache(maxsize=8)
75
+ def get_device_name(cls, device_id: int = 0) -> str:
76
+ return torch.cuda.get_device_name(device_id)
77
+
78
+ def is_cuda(self) -> bool:
79
+ return False
80
+
81
+ def is_rocm(self) -> bool:
82
+ return False
83
+
84
+ def is_mps(self) -> bool:
85
+ return True
86
+
87
+ current_platform = (
88
+ RocmPlatform() if IS_ROCM else
89
+ MpsPlatform() if IS_MPS else
90
+ CudaPlatform() if torch.cuda.is_available() else
91
+ None
92
+ )
build/torch29-cxx11-cu126-x86_64-linux/paged_attention/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._custom_ops import (
2
+ convert_fp8,
3
+ copy_blocks,
4
+ paged_attention_v1,
5
+ paged_attention_v2,
6
+ reshape_and_cache,
7
+ reshape_and_cache_flash,
8
+ swap_blocks,
9
+ )
10
+ from ._ops import ops
11
+
12
+ __all__ = [
13
+ "convert_fp8",
14
+ "copy_blocks",
15
+ "ops",
16
+ "paged_attention_v1",
17
+ "paged_attention_v2",
18
+ "reshape_and_cache",
19
+ "reshape_and_cache_flash",
20
+ "swap_blocks",
21
+ ]
build/torch29-cxx11-cu126-x86_64-linux/paged_attention/_custom_ops.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+
8
+ # page attention ops
9
+ def paged_attention_v1(
10
+ out: torch.Tensor,
11
+ query: torch.Tensor,
12
+ key_cache: torch.Tensor,
13
+ value_cache: torch.Tensor,
14
+ num_kv_heads: int,
15
+ scale: float,
16
+ block_tables: torch.Tensor,
17
+ seq_lens: torch.Tensor,
18
+ block_size: int,
19
+ max_seq_len: int,
20
+ alibi_slopes: Optional[torch.Tensor],
21
+ kv_cache_dtype: str,
22
+ k_scale: float,
23
+ v_scale: float,
24
+ tp_rank: int = 0,
25
+ blocksparse_local_blocks: int = 0,
26
+ blocksparse_vert_stride: int = 0,
27
+ blocksparse_block_size: int = 64,
28
+ blocksparse_head_sliding_step: int = 0,
29
+ ) -> None:
30
+ ops.paged_attention_v1(
31
+ out,
32
+ query,
33
+ key_cache,
34
+ value_cache,
35
+ num_kv_heads,
36
+ scale,
37
+ block_tables,
38
+ seq_lens,
39
+ block_size,
40
+ max_seq_len,
41
+ alibi_slopes,
42
+ kv_cache_dtype,
43
+ k_scale,
44
+ v_scale,
45
+ tp_rank,
46
+ blocksparse_local_blocks,
47
+ blocksparse_vert_stride,
48
+ blocksparse_block_size,
49
+ blocksparse_head_sliding_step,
50
+ )
51
+
52
+
53
+ def paged_attention_v2(
54
+ out: torch.Tensor,
55
+ exp_sum: torch.Tensor,
56
+ max_logits: torch.Tensor,
57
+ tmp_out: torch.Tensor,
58
+ query: torch.Tensor,
59
+ key_cache: torch.Tensor,
60
+ value_cache: torch.Tensor,
61
+ num_kv_heads: int,
62
+ scale: float,
63
+ block_tables: torch.Tensor,
64
+ seq_lens: torch.Tensor,
65
+ block_size: int,
66
+ max_seq_len: int,
67
+ alibi_slopes: Optional[torch.Tensor],
68
+ kv_cache_dtype: str,
69
+ k_scale: float,
70
+ v_scale: float,
71
+ tp_rank: int = 0,
72
+ blocksparse_local_blocks: int = 0,
73
+ blocksparse_vert_stride: int = 0,
74
+ blocksparse_block_size: int = 64,
75
+ blocksparse_head_sliding_step: int = 0,
76
+ ) -> None:
77
+ ops.paged_attention_v2(
78
+ out,
79
+ exp_sum,
80
+ max_logits,
81
+ tmp_out,
82
+ query,
83
+ key_cache,
84
+ value_cache,
85
+ num_kv_heads,
86
+ scale,
87
+ block_tables,
88
+ seq_lens,
89
+ block_size,
90
+ max_seq_len,
91
+ alibi_slopes,
92
+ kv_cache_dtype,
93
+ k_scale,
94
+ v_scale,
95
+ tp_rank,
96
+ blocksparse_local_blocks,
97
+ blocksparse_vert_stride,
98
+ blocksparse_block_size,
99
+ blocksparse_head_sliding_step,
100
+ )
101
+
102
+
103
+ def reshape_and_cache(
104
+ key: torch.Tensor,
105
+ value: torch.Tensor,
106
+ key_cache: torch.Tensor,
107
+ value_cache: torch.Tensor,
108
+ slot_mapping: torch.Tensor,
109
+ kv_cache_dtype: str,
110
+ k_scale: float,
111
+ v_scale: float,
112
+ ) -> None:
113
+ ops.reshape_and_cache(
114
+ key,
115
+ value,
116
+ key_cache,
117
+ value_cache,
118
+ slot_mapping,
119
+ kv_cache_dtype,
120
+ k_scale,
121
+ v_scale,
122
+ )
123
+
124
+
125
+ def reshape_and_cache_flash(
126
+ key: torch.Tensor,
127
+ value: torch.Tensor,
128
+ key_cache: torch.Tensor,
129
+ value_cache: torch.Tensor,
130
+ slot_mapping: torch.Tensor,
131
+ kv_cache_dtype: str,
132
+ k_scale: torch.Tensor,
133
+ v_scale: torch.Tensor,
134
+ ) -> None:
135
+ ops.reshape_and_cache_flash(
136
+ key,
137
+ value,
138
+ key_cache,
139
+ value_cache,
140
+ slot_mapping,
141
+ kv_cache_dtype,
142
+ k_scale,
143
+ v_scale,
144
+ )
145
+
146
+
147
+ def copy_blocks(
148
+ key_caches: List[torch.Tensor],
149
+ value_caches: List[torch.Tensor],
150
+ block_mapping: torch.Tensor,
151
+ ) -> None:
152
+ ops.copy_blocks(key_caches, value_caches, block_mapping)
153
+
154
+
155
+ def swap_blocks(
156
+ src: torch.Tensor, dst: torch.Tensor, block_mapping: torch.Tensor
157
+ ) -> None:
158
+ ops.swap_blocks(src, dst, block_mapping)
159
+
160
+
161
+ def convert_fp8(
162
+ output: torch.Tensor, input: torch.Tensor, scale: float = 1.0, kv_dtype: str = "fp8"
163
+ ) -> None:
164
+ ops.convert_fp8(output, input, scale, kv_dtype)
165
+
166
+
167
+ __all__ = [
168
+ "convert_fp8",
169
+ "paged_attention_v1",
170
+ "paged_attention_v2",
171
+ "reshape_and_cache",
172
+ "copy_blocks",
173
+ ]
build/torch29-cxx11-cu126-x86_64-linux/paged_attention/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _paged_attention_287831d
3
+ ops = torch.ops._paged_attention_287831d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_paged_attention_287831d::{op_name}"
build/torch29-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_287831d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf84af5418afac998537d6c01c88ad3bd083fa1912a1c90ae70809196cb0cc45
3
+ size 85751704
build/torch29-cxx11-cu126-x86_64-linux/paged_attention/platforms.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from abc import ABC, abstractmethod
4
+ from functools import lru_cache, wraps
5
+ from typing import Callable, ParamSpec, TypeVar
6
+
7
+ import numpy as np
8
+ import torch
9
+
10
+ IS_ROCM = torch.version.hip is not None
11
+ IS_MPS = torch.backends.mps.is_available()
12
+
13
+
14
+ class Platform(ABC):
15
+ @classmethod
16
+ def seed_everything(cls, seed: int) -> None:
17
+ """
18
+ Set the seed of each random module.
19
+ `torch.manual_seed` will set seed on all devices.
20
+
21
+ Loosely based on: https://github.com/Lightning-AI/pytorch-lightning/blob/2.4.0/src/lightning/fabric/utilities/seed.py#L20
22
+ """
23
+ random.seed(seed)
24
+ np.random.seed(seed)
25
+ torch.manual_seed(seed)
26
+
27
+ @abstractmethod
28
+ def get_device_name(self, device_id: int = 0) -> str: ...
29
+
30
+ @abstractmethod
31
+ def is_cuda(self) -> bool: ...
32
+
33
+ @abstractmethod
34
+ def is_rocm(self) -> bool: ...
35
+
36
+ @abstractmethod
37
+ def is_mps(self) -> bool: ...
38
+
39
+
40
+ class CudaPlatform(Platform):
41
+ @classmethod
42
+ @lru_cache(maxsize=8)
43
+ def get_device_name(cls, device_id: int = 0) -> str:
44
+ return torch.cuda.get_device_name(0)
45
+
46
+ def is_cuda(self) -> bool:
47
+ return True
48
+
49
+ def is_rocm(self) -> bool:
50
+ return False
51
+
52
+ def is_mps(self) -> bool:
53
+ return False
54
+
55
+
56
+ class RocmPlatform(Platform):
57
+ @classmethod
58
+ @lru_cache(maxsize=8)
59
+ def get_device_name(cls, device_id: int = 0) -> str:
60
+ return torch.cuda.get_device_name(device_id)
61
+
62
+ def is_cuda(self) -> bool:
63
+ return False
64
+
65
+ def is_rocm(self) -> bool:
66
+ return True
67
+
68
+ def is_mps(self) -> bool:
69
+ return False
70
+
71
+
72
+ class MpsPlatform(Platform):
73
+ @classmethod
74
+ @lru_cache(maxsize=8)
75
+ def get_device_name(cls, device_id: int = 0) -> str:
76
+ return torch.cuda.get_device_name(device_id)
77
+
78
+ def is_cuda(self) -> bool:
79
+ return False
80
+
81
+ def is_rocm(self) -> bool:
82
+ return False
83
+
84
+ def is_mps(self) -> bool:
85
+ return True
86
+
87
+ current_platform = (
88
+ RocmPlatform() if IS_ROCM else
89
+ MpsPlatform() if IS_MPS else
90
+ CudaPlatform() if torch.cuda.is_available() else
91
+ None
92
+ )
build/torch29-cxx11-cu128-x86_64-linux/paged_attention/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._custom_ops import (
2
+ convert_fp8,
3
+ copy_blocks,
4
+ paged_attention_v1,
5
+ paged_attention_v2,
6
+ reshape_and_cache,
7
+ reshape_and_cache_flash,
8
+ swap_blocks,
9
+ )
10
+ from ._ops import ops
11
+
12
+ __all__ = [
13
+ "convert_fp8",
14
+ "copy_blocks",
15
+ "ops",
16
+ "paged_attention_v1",
17
+ "paged_attention_v2",
18
+ "reshape_and_cache",
19
+ "reshape_and_cache_flash",
20
+ "swap_blocks",
21
+ ]
build/torch29-cxx11-cu128-x86_64-linux/paged_attention/_custom_ops.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+
8
+ # page attention ops
9
+ def paged_attention_v1(
10
+ out: torch.Tensor,
11
+ query: torch.Tensor,
12
+ key_cache: torch.Tensor,
13
+ value_cache: torch.Tensor,
14
+ num_kv_heads: int,
15
+ scale: float,
16
+ block_tables: torch.Tensor,
17
+ seq_lens: torch.Tensor,
18
+ block_size: int,
19
+ max_seq_len: int,
20
+ alibi_slopes: Optional[torch.Tensor],
21
+ kv_cache_dtype: str,
22
+ k_scale: float,
23
+ v_scale: float,
24
+ tp_rank: int = 0,
25
+ blocksparse_local_blocks: int = 0,
26
+ blocksparse_vert_stride: int = 0,
27
+ blocksparse_block_size: int = 64,
28
+ blocksparse_head_sliding_step: int = 0,
29
+ ) -> None:
30
+ ops.paged_attention_v1(
31
+ out,
32
+ query,
33
+ key_cache,
34
+ value_cache,
35
+ num_kv_heads,
36
+ scale,
37
+ block_tables,
38
+ seq_lens,
39
+ block_size,
40
+ max_seq_len,
41
+ alibi_slopes,
42
+ kv_cache_dtype,
43
+ k_scale,
44
+ v_scale,
45
+ tp_rank,
46
+ blocksparse_local_blocks,
47
+ blocksparse_vert_stride,
48
+ blocksparse_block_size,
49
+ blocksparse_head_sliding_step,
50
+ )
51
+
52
+
53
+ def paged_attention_v2(
54
+ out: torch.Tensor,
55
+ exp_sum: torch.Tensor,
56
+ max_logits: torch.Tensor,
57
+ tmp_out: torch.Tensor,
58
+ query: torch.Tensor,
59
+ key_cache: torch.Tensor,
60
+ value_cache: torch.Tensor,
61
+ num_kv_heads: int,
62
+ scale: float,
63
+ block_tables: torch.Tensor,
64
+ seq_lens: torch.Tensor,
65
+ block_size: int,
66
+ max_seq_len: int,
67
+ alibi_slopes: Optional[torch.Tensor],
68
+ kv_cache_dtype: str,
69
+ k_scale: float,
70
+ v_scale: float,
71
+ tp_rank: int = 0,
72
+ blocksparse_local_blocks: int = 0,
73
+ blocksparse_vert_stride: int = 0,
74
+ blocksparse_block_size: int = 64,
75
+ blocksparse_head_sliding_step: int = 0,
76
+ ) -> None:
77
+ ops.paged_attention_v2(
78
+ out,
79
+ exp_sum,
80
+ max_logits,
81
+ tmp_out,
82
+ query,
83
+ key_cache,
84
+ value_cache,
85
+ num_kv_heads,
86
+ scale,
87
+ block_tables,
88
+ seq_lens,
89
+ block_size,
90
+ max_seq_len,
91
+ alibi_slopes,
92
+ kv_cache_dtype,
93
+ k_scale,
94
+ v_scale,
95
+ tp_rank,
96
+ blocksparse_local_blocks,
97
+ blocksparse_vert_stride,
98
+ blocksparse_block_size,
99
+ blocksparse_head_sliding_step,
100
+ )
101
+
102
+
103
+ def reshape_and_cache(
104
+ key: torch.Tensor,
105
+ value: torch.Tensor,
106
+ key_cache: torch.Tensor,
107
+ value_cache: torch.Tensor,
108
+ slot_mapping: torch.Tensor,
109
+ kv_cache_dtype: str,
110
+ k_scale: float,
111
+ v_scale: float,
112
+ ) -> None:
113
+ ops.reshape_and_cache(
114
+ key,
115
+ value,
116
+ key_cache,
117
+ value_cache,
118
+ slot_mapping,
119
+ kv_cache_dtype,
120
+ k_scale,
121
+ v_scale,
122
+ )
123
+
124
+
125
+ def reshape_and_cache_flash(
126
+ key: torch.Tensor,
127
+ value: torch.Tensor,
128
+ key_cache: torch.Tensor,
129
+ value_cache: torch.Tensor,
130
+ slot_mapping: torch.Tensor,
131
+ kv_cache_dtype: str,
132
+ k_scale: torch.Tensor,
133
+ v_scale: torch.Tensor,
134
+ ) -> None:
135
+ ops.reshape_and_cache_flash(
136
+ key,
137
+ value,
138
+ key_cache,
139
+ value_cache,
140
+ slot_mapping,
141
+ kv_cache_dtype,
142
+ k_scale,
143
+ v_scale,
144
+ )
145
+
146
+
147
+ def copy_blocks(
148
+ key_caches: List[torch.Tensor],
149
+ value_caches: List[torch.Tensor],
150
+ block_mapping: torch.Tensor,
151
+ ) -> None:
152
+ ops.copy_blocks(key_caches, value_caches, block_mapping)
153
+
154
+
155
+ def swap_blocks(
156
+ src: torch.Tensor, dst: torch.Tensor, block_mapping: torch.Tensor
157
+ ) -> None:
158
+ ops.swap_blocks(src, dst, block_mapping)
159
+
160
+
161
+ def convert_fp8(
162
+ output: torch.Tensor, input: torch.Tensor, scale: float = 1.0, kv_dtype: str = "fp8"
163
+ ) -> None:
164
+ ops.convert_fp8(output, input, scale, kv_dtype)
165
+
166
+
167
+ __all__ = [
168
+ "convert_fp8",
169
+ "paged_attention_v1",
170
+ "paged_attention_v2",
171
+ "reshape_and_cache",
172
+ "copy_blocks",
173
+ ]
build/torch29-cxx11-cu128-x86_64-linux/paged_attention/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _paged_attention_287831d
3
+ ops = torch.ops._paged_attention_287831d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_paged_attention_287831d::{op_name}"
build/torch29-cxx11-cu128-x86_64-linux/paged_attention/_paged_attention_287831d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39a0b3ae487a77fd16fae849a19f4c542a7ca8b8d423a64e32692e77b036eee6
3
+ size 102693928
build/torch29-cxx11-cu128-x86_64-linux/paged_attention/platforms.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from abc import ABC, abstractmethod
4
+ from functools import lru_cache, wraps
5
+ from typing import Callable, ParamSpec, TypeVar
6
+
7
+ import numpy as np
8
+ import torch
9
+
10
+ IS_ROCM = torch.version.hip is not None
11
+ IS_MPS = torch.backends.mps.is_available()
12
+
13
+
14
+ class Platform(ABC):
15
+ @classmethod
16
+ def seed_everything(cls, seed: int) -> None:
17
+ """
18
+ Set the seed of each random module.
19
+ `torch.manual_seed` will set seed on all devices.
20
+
21
+ Loosely based on: https://github.com/Lightning-AI/pytorch-lightning/blob/2.4.0/src/lightning/fabric/utilities/seed.py#L20
22
+ """
23
+ random.seed(seed)
24
+ np.random.seed(seed)
25
+ torch.manual_seed(seed)
26
+
27
+ @abstractmethod
28
+ def get_device_name(self, device_id: int = 0) -> str: ...
29
+
30
+ @abstractmethod
31
+ def is_cuda(self) -> bool: ...
32
+
33
+ @abstractmethod
34
+ def is_rocm(self) -> bool: ...
35
+
36
+ @abstractmethod
37
+ def is_mps(self) -> bool: ...
38
+
39
+
40
+ class CudaPlatform(Platform):
41
+ @classmethod
42
+ @lru_cache(maxsize=8)
43
+ def get_device_name(cls, device_id: int = 0) -> str:
44
+ return torch.cuda.get_device_name(0)
45
+
46
+ def is_cuda(self) -> bool:
47
+ return True
48
+
49
+ def is_rocm(self) -> bool:
50
+ return False
51
+
52
+ def is_mps(self) -> bool:
53
+ return False
54
+
55
+
56
+ class RocmPlatform(Platform):
57
+ @classmethod
58
+ @lru_cache(maxsize=8)
59
+ def get_device_name(cls, device_id: int = 0) -> str:
60
+ return torch.cuda.get_device_name(device_id)
61
+
62
+ def is_cuda(self) -> bool:
63
+ return False
64
+
65
+ def is_rocm(self) -> bool:
66
+ return True
67
+
68
+ def is_mps(self) -> bool:
69
+ return False
70
+
71
+
72
+ class MpsPlatform(Platform):
73
+ @classmethod
74
+ @lru_cache(maxsize=8)
75
+ def get_device_name(cls, device_id: int = 0) -> str:
76
+ return torch.cuda.get_device_name(device_id)
77
+
78
+ def is_cuda(self) -> bool:
79
+ return False
80
+
81
+ def is_rocm(self) -> bool:
82
+ return False
83
+
84
+ def is_mps(self) -> bool:
85
+ return True
86
+
87
+ current_platform = (
88
+ RocmPlatform() if IS_ROCM else
89
+ MpsPlatform() if IS_MPS else
90
+ CudaPlatform() if torch.cuda.is_available() else
91
+ None
92
+ )
build/torch29-cxx11-cu130-x86_64-linux/paged_attention/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._custom_ops import (
2
+ convert_fp8,
3
+ copy_blocks,
4
+ paged_attention_v1,
5
+ paged_attention_v2,
6
+ reshape_and_cache,
7
+ reshape_and_cache_flash,
8
+ swap_blocks,
9
+ )
10
+ from ._ops import ops
11
+
12
+ __all__ = [
13
+ "convert_fp8",
14
+ "copy_blocks",
15
+ "ops",
16
+ "paged_attention_v1",
17
+ "paged_attention_v2",
18
+ "reshape_and_cache",
19
+ "reshape_and_cache_flash",
20
+ "swap_blocks",
21
+ ]
build/torch29-cxx11-cu130-x86_64-linux/paged_attention/_custom_ops.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+
8
+ # page attention ops
9
+ def paged_attention_v1(
10
+ out: torch.Tensor,
11
+ query: torch.Tensor,
12
+ key_cache: torch.Tensor,
13
+ value_cache: torch.Tensor,
14
+ num_kv_heads: int,
15
+ scale: float,
16
+ block_tables: torch.Tensor,
17
+ seq_lens: torch.Tensor,
18
+ block_size: int,
19
+ max_seq_len: int,
20
+ alibi_slopes: Optional[torch.Tensor],
21
+ kv_cache_dtype: str,
22
+ k_scale: float,
23
+ v_scale: float,
24
+ tp_rank: int = 0,
25
+ blocksparse_local_blocks: int = 0,
26
+ blocksparse_vert_stride: int = 0,
27
+ blocksparse_block_size: int = 64,
28
+ blocksparse_head_sliding_step: int = 0,
29
+ ) -> None:
30
+ ops.paged_attention_v1(
31
+ out,
32
+ query,
33
+ key_cache,
34
+ value_cache,
35
+ num_kv_heads,
36
+ scale,
37
+ block_tables,
38
+ seq_lens,
39
+ block_size,
40
+ max_seq_len,
41
+ alibi_slopes,
42
+ kv_cache_dtype,
43
+ k_scale,
44
+ v_scale,
45
+ tp_rank,
46
+ blocksparse_local_blocks,
47
+ blocksparse_vert_stride,
48
+ blocksparse_block_size,
49
+ blocksparse_head_sliding_step,
50
+ )
51
+
52
+
53
+ def paged_attention_v2(
54
+ out: torch.Tensor,
55
+ exp_sum: torch.Tensor,
56
+ max_logits: torch.Tensor,
57
+ tmp_out: torch.Tensor,
58
+ query: torch.Tensor,
59
+ key_cache: torch.Tensor,
60
+ value_cache: torch.Tensor,
61
+ num_kv_heads: int,
62
+ scale: float,
63
+ block_tables: torch.Tensor,
64
+ seq_lens: torch.Tensor,
65
+ block_size: int,
66
+ max_seq_len: int,
67
+ alibi_slopes: Optional[torch.Tensor],
68
+ kv_cache_dtype: str,
69
+ k_scale: float,
70
+ v_scale: float,
71
+ tp_rank: int = 0,
72
+ blocksparse_local_blocks: int = 0,
73
+ blocksparse_vert_stride: int = 0,
74
+ blocksparse_block_size: int = 64,
75
+ blocksparse_head_sliding_step: int = 0,
76
+ ) -> None:
77
+ ops.paged_attention_v2(
78
+ out,
79
+ exp_sum,
80
+ max_logits,
81
+ tmp_out,
82
+ query,
83
+ key_cache,
84
+ value_cache,
85
+ num_kv_heads,
86
+ scale,
87
+ block_tables,
88
+ seq_lens,
89
+ block_size,
90
+ max_seq_len,
91
+ alibi_slopes,
92
+ kv_cache_dtype,
93
+ k_scale,
94
+ v_scale,
95
+ tp_rank,
96
+ blocksparse_local_blocks,
97
+ blocksparse_vert_stride,
98
+ blocksparse_block_size,
99
+ blocksparse_head_sliding_step,
100
+ )
101
+
102
+
103
+ def reshape_and_cache(
104
+ key: torch.Tensor,
105
+ value: torch.Tensor,
106
+ key_cache: torch.Tensor,
107
+ value_cache: torch.Tensor,
108
+ slot_mapping: torch.Tensor,
109
+ kv_cache_dtype: str,
110
+ k_scale: float,
111
+ v_scale: float,
112
+ ) -> None:
113
+ ops.reshape_and_cache(
114
+ key,
115
+ value,
116
+ key_cache,
117
+ value_cache,
118
+ slot_mapping,
119
+ kv_cache_dtype,
120
+ k_scale,
121
+ v_scale,
122
+ )
123
+
124
+
125
+ def reshape_and_cache_flash(
126
+ key: torch.Tensor,
127
+ value: torch.Tensor,
128
+ key_cache: torch.Tensor,
129
+ value_cache: torch.Tensor,
130
+ slot_mapping: torch.Tensor,
131
+ kv_cache_dtype: str,
132
+ k_scale: torch.Tensor,
133
+ v_scale: torch.Tensor,
134
+ ) -> None:
135
+ ops.reshape_and_cache_flash(
136
+ key,
137
+ value,
138
+ key_cache,
139
+ value_cache,
140
+ slot_mapping,
141
+ kv_cache_dtype,
142
+ k_scale,
143
+ v_scale,
144
+ )
145
+
146
+
147
+ def copy_blocks(
148
+ key_caches: List[torch.Tensor],
149
+ value_caches: List[torch.Tensor],
150
+ block_mapping: torch.Tensor,
151
+ ) -> None:
152
+ ops.copy_blocks(key_caches, value_caches, block_mapping)
153
+
154
+
155
+ def swap_blocks(
156
+ src: torch.Tensor, dst: torch.Tensor, block_mapping: torch.Tensor
157
+ ) -> None:
158
+ ops.swap_blocks(src, dst, block_mapping)
159
+
160
+
161
+ def convert_fp8(
162
+ output: torch.Tensor, input: torch.Tensor, scale: float = 1.0, kv_dtype: str = "fp8"
163
+ ) -> None:
164
+ ops.convert_fp8(output, input, scale, kv_dtype)
165
+
166
+
167
+ __all__ = [
168
+ "convert_fp8",
169
+ "paged_attention_v1",
170
+ "paged_attention_v2",
171
+ "reshape_and_cache",
172
+ "copy_blocks",
173
+ ]
build/torch29-cxx11-cu130-x86_64-linux/paged_attention/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _paged_attention_287831d
3
+ ops = torch.ops._paged_attention_287831d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_paged_attention_287831d::{op_name}"
build/torch29-cxx11-cu130-x86_64-linux/paged_attention/_paged_attention_287831d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:231953b324c6cfd0847c9322d311e7096af3909ced06344b6a5485baed936767
3
+ size 63028488
build/torch29-cxx11-cu130-x86_64-linux/paged_attention/platforms.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from abc import ABC, abstractmethod
4
+ from functools import lru_cache, wraps
5
+ from typing import Callable, ParamSpec, TypeVar
6
+
7
+ import numpy as np
8
+ import torch
9
+
10
+ IS_ROCM = torch.version.hip is not None
11
+ IS_MPS = torch.backends.mps.is_available()
12
+
13
+
14
+ class Platform(ABC):
15
+ @classmethod
16
+ def seed_everything(cls, seed: int) -> None:
17
+ """
18
+ Set the seed of each random module.
19
+ `torch.manual_seed` will set seed on all devices.
20
+
21
+ Loosely based on: https://github.com/Lightning-AI/pytorch-lightning/blob/2.4.0/src/lightning/fabric/utilities/seed.py#L20
22
+ """
23
+ random.seed(seed)
24
+ np.random.seed(seed)
25
+ torch.manual_seed(seed)
26
+
27
+ @abstractmethod
28
+ def get_device_name(self, device_id: int = 0) -> str: ...
29
+
30
+ @abstractmethod
31
+ def is_cuda(self) -> bool: ...
32
+
33
+ @abstractmethod
34
+ def is_rocm(self) -> bool: ...
35
+
36
+ @abstractmethod
37
+ def is_mps(self) -> bool: ...
38
+
39
+
40
+ class CudaPlatform(Platform):
41
+ @classmethod
42
+ @lru_cache(maxsize=8)
43
+ def get_device_name(cls, device_id: int = 0) -> str:
44
+ return torch.cuda.get_device_name(0)
45
+
46
+ def is_cuda(self) -> bool:
47
+ return True
48
+
49
+ def is_rocm(self) -> bool:
50
+ return False
51
+
52
+ def is_mps(self) -> bool:
53
+ return False
54
+
55
+
56
+ class RocmPlatform(Platform):
57
+ @classmethod
58
+ @lru_cache(maxsize=8)
59
+ def get_device_name(cls, device_id: int = 0) -> str:
60
+ return torch.cuda.get_device_name(device_id)
61
+
62
+ def is_cuda(self) -> bool:
63
+ return False
64
+
65
+ def is_rocm(self) -> bool:
66
+ return True
67
+
68
+ def is_mps(self) -> bool:
69
+ return False
70
+
71
+
72
+ class MpsPlatform(Platform):
73
+ @classmethod
74
+ @lru_cache(maxsize=8)
75
+ def get_device_name(cls, device_id: int = 0) -> str:
76
+ return torch.cuda.get_device_name(device_id)
77
+
78
+ def is_cuda(self) -> bool:
79
+ return False
80
+
81
+ def is_rocm(self) -> bool:
82
+ return False
83
+
84
+ def is_mps(self) -> bool:
85
+ return True
86
+
87
+ current_platform = (
88
+ RocmPlatform() if IS_ROCM else
89
+ MpsPlatform() if IS_MPS else
90
+ CudaPlatform() if torch.cuda.is_available() else
91
+ None
92
+ )
build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._custom_ops import (
2
+ convert_fp8,
3
+ copy_blocks,
4
+ paged_attention_v1,
5
+ paged_attention_v2,
6
+ reshape_and_cache,
7
+ reshape_and_cache_flash,
8
+ swap_blocks,
9
+ )
10
+ from ._ops import ops
11
+
12
+ __all__ = [
13
+ "convert_fp8",
14
+ "copy_blocks",
15
+ "ops",
16
+ "paged_attention_v1",
17
+ "paged_attention_v2",
18
+ "reshape_and_cache",
19
+ "reshape_and_cache_flash",
20
+ "swap_blocks",
21
+ ]
build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/_custom_ops.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+
8
+ # page attention ops
9
+ def paged_attention_v1(
10
+ out: torch.Tensor,
11
+ query: torch.Tensor,
12
+ key_cache: torch.Tensor,
13
+ value_cache: torch.Tensor,
14
+ num_kv_heads: int,
15
+ scale: float,
16
+ block_tables: torch.Tensor,
17
+ seq_lens: torch.Tensor,
18
+ block_size: int,
19
+ max_seq_len: int,
20
+ alibi_slopes: Optional[torch.Tensor],
21
+ kv_cache_dtype: str,
22
+ k_scale: float,
23
+ v_scale: float,
24
+ tp_rank: int = 0,
25
+ blocksparse_local_blocks: int = 0,
26
+ blocksparse_vert_stride: int = 0,
27
+ blocksparse_block_size: int = 64,
28
+ blocksparse_head_sliding_step: int = 0,
29
+ ) -> None:
30
+ ops.paged_attention_v1(
31
+ out,
32
+ query,
33
+ key_cache,
34
+ value_cache,
35
+ num_kv_heads,
36
+ scale,
37
+ block_tables,
38
+ seq_lens,
39
+ block_size,
40
+ max_seq_len,
41
+ alibi_slopes,
42
+ kv_cache_dtype,
43
+ k_scale,
44
+ v_scale,
45
+ tp_rank,
46
+ blocksparse_local_blocks,
47
+ blocksparse_vert_stride,
48
+ blocksparse_block_size,
49
+ blocksparse_head_sliding_step,
50
+ )
51
+
52
+
53
+ def paged_attention_v2(
54
+ out: torch.Tensor,
55
+ exp_sum: torch.Tensor,
56
+ max_logits: torch.Tensor,
57
+ tmp_out: torch.Tensor,
58
+ query: torch.Tensor,
59
+ key_cache: torch.Tensor,
60
+ value_cache: torch.Tensor,
61
+ num_kv_heads: int,
62
+ scale: float,
63
+ block_tables: torch.Tensor,
64
+ seq_lens: torch.Tensor,
65
+ block_size: int,
66
+ max_seq_len: int,
67
+ alibi_slopes: Optional[torch.Tensor],
68
+ kv_cache_dtype: str,
69
+ k_scale: float,
70
+ v_scale: float,
71
+ tp_rank: int = 0,
72
+ blocksparse_local_blocks: int = 0,
73
+ blocksparse_vert_stride: int = 0,
74
+ blocksparse_block_size: int = 64,
75
+ blocksparse_head_sliding_step: int = 0,
76
+ ) -> None:
77
+ ops.paged_attention_v2(
78
+ out,
79
+ exp_sum,
80
+ max_logits,
81
+ tmp_out,
82
+ query,
83
+ key_cache,
84
+ value_cache,
85
+ num_kv_heads,
86
+ scale,
87
+ block_tables,
88
+ seq_lens,
89
+ block_size,
90
+ max_seq_len,
91
+ alibi_slopes,
92
+ kv_cache_dtype,
93
+ k_scale,
94
+ v_scale,
95
+ tp_rank,
96
+ blocksparse_local_blocks,
97
+ blocksparse_vert_stride,
98
+ blocksparse_block_size,
99
+ blocksparse_head_sliding_step,
100
+ )
101
+
102
+
103
+ def reshape_and_cache(
104
+ key: torch.Tensor,
105
+ value: torch.Tensor,
106
+ key_cache: torch.Tensor,
107
+ value_cache: torch.Tensor,
108
+ slot_mapping: torch.Tensor,
109
+ kv_cache_dtype: str,
110
+ k_scale: float,
111
+ v_scale: float,
112
+ ) -> None:
113
+ ops.reshape_and_cache(
114
+ key,
115
+ value,
116
+ key_cache,
117
+ value_cache,
118
+ slot_mapping,
119
+ kv_cache_dtype,
120
+ k_scale,
121
+ v_scale,
122
+ )
123
+
124
+
125
+ def reshape_and_cache_flash(
126
+ key: torch.Tensor,
127
+ value: torch.Tensor,
128
+ key_cache: torch.Tensor,
129
+ value_cache: torch.Tensor,
130
+ slot_mapping: torch.Tensor,
131
+ kv_cache_dtype: str,
132
+ k_scale: torch.Tensor,
133
+ v_scale: torch.Tensor,
134
+ ) -> None:
135
+ ops.reshape_and_cache_flash(
136
+ key,
137
+ value,
138
+ key_cache,
139
+ value_cache,
140
+ slot_mapping,
141
+ kv_cache_dtype,
142
+ k_scale,
143
+ v_scale,
144
+ )
145
+
146
+
147
+ def copy_blocks(
148
+ key_caches: List[torch.Tensor],
149
+ value_caches: List[torch.Tensor],
150
+ block_mapping: torch.Tensor,
151
+ ) -> None:
152
+ ops.copy_blocks(key_caches, value_caches, block_mapping)
153
+
154
+
155
+ def swap_blocks(
156
+ src: torch.Tensor, dst: torch.Tensor, block_mapping: torch.Tensor
157
+ ) -> None:
158
+ ops.swap_blocks(src, dst, block_mapping)
159
+
160
+
161
+ def convert_fp8(
162
+ output: torch.Tensor, input: torch.Tensor, scale: float = 1.0, kv_dtype: str = "fp8"
163
+ ) -> None:
164
+ ops.convert_fp8(output, input, scale, kv_dtype)
165
+
166
+
167
+ __all__ = [
168
+ "convert_fp8",
169
+ "paged_attention_v1",
170
+ "paged_attention_v2",
171
+ "reshape_and_cache",
172
+ "copy_blocks",
173
+ ]
build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _paged_attention_287831d
3
+ ops = torch.ops._paged_attention_287831d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_paged_attention_287831d::{op_name}"
build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/_paged_attention_287831d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:406d35b2ad310b4d62739fb18b48931e7631d608ab10be496e65784860d12bae
3
+ size 120179000
build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/platforms.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from abc import ABC, abstractmethod
4
+ from functools import lru_cache, wraps
5
+ from typing import Callable, ParamSpec, TypeVar
6
+
7
+ import numpy as np
8
+ import torch
9
+
10
+ IS_ROCM = torch.version.hip is not None
11
+ IS_MPS = torch.backends.mps.is_available()
12
+
13
+
14
+ class Platform(ABC):
15
+ @classmethod
16
+ def seed_everything(cls, seed: int) -> None:
17
+ """
18
+ Set the seed of each random module.
19
+ `torch.manual_seed` will set seed on all devices.
20
+
21
+ Loosely based on: https://github.com/Lightning-AI/pytorch-lightning/blob/2.4.0/src/lightning/fabric/utilities/seed.py#L20
22
+ """
23
+ random.seed(seed)
24
+ np.random.seed(seed)
25
+ torch.manual_seed(seed)
26
+
27
+ @abstractmethod
28
+ def get_device_name(self, device_id: int = 0) -> str: ...
29
+
30
+ @abstractmethod
31
+ def is_cuda(self) -> bool: ...
32
+
33
+ @abstractmethod
34
+ def is_rocm(self) -> bool: ...
35
+
36
+ @abstractmethod
37
+ def is_mps(self) -> bool: ...
38
+
39
+
40
+ class CudaPlatform(Platform):
41
+ @classmethod
42
+ @lru_cache(maxsize=8)
43
+ def get_device_name(cls, device_id: int = 0) -> str:
44
+ return torch.cuda.get_device_name(0)
45
+
46
+ def is_cuda(self) -> bool:
47
+ return True
48
+
49
+ def is_rocm(self) -> bool:
50
+ return False
51
+
52
+ def is_mps(self) -> bool:
53
+ return False
54
+
55
+
56
+ class RocmPlatform(Platform):
57
+ @classmethod
58
+ @lru_cache(maxsize=8)
59
+ def get_device_name(cls, device_id: int = 0) -> str:
60
+ return torch.cuda.get_device_name(device_id)
61
+
62
+ def is_cuda(self) -> bool:
63
+ return False
64
+
65
+ def is_rocm(self) -> bool:
66
+ return True
67
+
68
+ def is_mps(self) -> bool:
69
+ return False
70
+
71
+
72
+ class MpsPlatform(Platform):
73
+ @classmethod
74
+ @lru_cache(maxsize=8)
75
+ def get_device_name(cls, device_id: int = 0) -> str:
76
+ return torch.cuda.get_device_name(device_id)
77
+
78
+ def is_cuda(self) -> bool:
79
+ return False
80
+
81
+ def is_rocm(self) -> bool:
82
+ return False
83
+
84
+ def is_mps(self) -> bool:
85
+ return True
86
+
87
+ current_platform = (
88
+ RocmPlatform() if IS_ROCM else
89
+ MpsPlatform() if IS_MPS else
90
+ CudaPlatform() if torch.cuda.is_available() else
91
+ None
92
+ )
build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._custom_ops import (
2
+ convert_fp8,
3
+ copy_blocks,
4
+ paged_attention_v1,
5
+ paged_attention_v2,
6
+ reshape_and_cache,
7
+ reshape_and_cache_flash,
8
+ swap_blocks,
9
+ )
10
+ from ._ops import ops
11
+
12
+ __all__ = [
13
+ "convert_fp8",
14
+ "copy_blocks",
15
+ "ops",
16
+ "paged_attention_v1",
17
+ "paged_attention_v2",
18
+ "reshape_and_cache",
19
+ "reshape_and_cache_flash",
20
+ "swap_blocks",
21
+ ]
build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/_custom_ops.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+
8
+ # page attention ops
9
+ def paged_attention_v1(
10
+ out: torch.Tensor,
11
+ query: torch.Tensor,
12
+ key_cache: torch.Tensor,
13
+ value_cache: torch.Tensor,
14
+ num_kv_heads: int,
15
+ scale: float,
16
+ block_tables: torch.Tensor,
17
+ seq_lens: torch.Tensor,
18
+ block_size: int,
19
+ max_seq_len: int,
20
+ alibi_slopes: Optional[torch.Tensor],
21
+ kv_cache_dtype: str,
22
+ k_scale: float,
23
+ v_scale: float,
24
+ tp_rank: int = 0,
25
+ blocksparse_local_blocks: int = 0,
26
+ blocksparse_vert_stride: int = 0,
27
+ blocksparse_block_size: int = 64,
28
+ blocksparse_head_sliding_step: int = 0,
29
+ ) -> None:
30
+ ops.paged_attention_v1(
31
+ out,
32
+ query,
33
+ key_cache,
34
+ value_cache,
35
+ num_kv_heads,
36
+ scale,
37
+ block_tables,
38
+ seq_lens,
39
+ block_size,
40
+ max_seq_len,
41
+ alibi_slopes,
42
+ kv_cache_dtype,
43
+ k_scale,
44
+ v_scale,
45
+ tp_rank,
46
+ blocksparse_local_blocks,
47
+ blocksparse_vert_stride,
48
+ blocksparse_block_size,
49
+ blocksparse_head_sliding_step,
50
+ )
51
+
52
+
53
+ def paged_attention_v2(
54
+ out: torch.Tensor,
55
+ exp_sum: torch.Tensor,
56
+ max_logits: torch.Tensor,
57
+ tmp_out: torch.Tensor,
58
+ query: torch.Tensor,
59
+ key_cache: torch.Tensor,
60
+ value_cache: torch.Tensor,
61
+ num_kv_heads: int,
62
+ scale: float,
63
+ block_tables: torch.Tensor,
64
+ seq_lens: torch.Tensor,
65
+ block_size: int,
66
+ max_seq_len: int,
67
+ alibi_slopes: Optional[torch.Tensor],
68
+ kv_cache_dtype: str,
69
+ k_scale: float,
70
+ v_scale: float,
71
+ tp_rank: int = 0,
72
+ blocksparse_local_blocks: int = 0,
73
+ blocksparse_vert_stride: int = 0,
74
+ blocksparse_block_size: int = 64,
75
+ blocksparse_head_sliding_step: int = 0,
76
+ ) -> None:
77
+ ops.paged_attention_v2(
78
+ out,
79
+ exp_sum,
80
+ max_logits,
81
+ tmp_out,
82
+ query,
83
+ key_cache,
84
+ value_cache,
85
+ num_kv_heads,
86
+ scale,
87
+ block_tables,
88
+ seq_lens,
89
+ block_size,
90
+ max_seq_len,
91
+ alibi_slopes,
92
+ kv_cache_dtype,
93
+ k_scale,
94
+ v_scale,
95
+ tp_rank,
96
+ blocksparse_local_blocks,
97
+ blocksparse_vert_stride,
98
+ blocksparse_block_size,
99
+ blocksparse_head_sliding_step,
100
+ )
101
+
102
+
103
+ def reshape_and_cache(
104
+ key: torch.Tensor,
105
+ value: torch.Tensor,
106
+ key_cache: torch.Tensor,
107
+ value_cache: torch.Tensor,
108
+ slot_mapping: torch.Tensor,
109
+ kv_cache_dtype: str,
110
+ k_scale: float,
111
+ v_scale: float,
112
+ ) -> None:
113
+ ops.reshape_and_cache(
114
+ key,
115
+ value,
116
+ key_cache,
117
+ value_cache,
118
+ slot_mapping,
119
+ kv_cache_dtype,
120
+ k_scale,
121
+ v_scale,
122
+ )
123
+
124
+
125
+ def reshape_and_cache_flash(
126
+ key: torch.Tensor,
127
+ value: torch.Tensor,
128
+ key_cache: torch.Tensor,
129
+ value_cache: torch.Tensor,
130
+ slot_mapping: torch.Tensor,
131
+ kv_cache_dtype: str,
132
+ k_scale: torch.Tensor,
133
+ v_scale: torch.Tensor,
134
+ ) -> None:
135
+ ops.reshape_and_cache_flash(
136
+ key,
137
+ value,
138
+ key_cache,
139
+ value_cache,
140
+ slot_mapping,
141
+ kv_cache_dtype,
142
+ k_scale,
143
+ v_scale,
144
+ )
145
+
146
+
147
+ def copy_blocks(
148
+ key_caches: List[torch.Tensor],
149
+ value_caches: List[torch.Tensor],
150
+ block_mapping: torch.Tensor,
151
+ ) -> None:
152
+ ops.copy_blocks(key_caches, value_caches, block_mapping)
153
+
154
+
155
+ def swap_blocks(
156
+ src: torch.Tensor, dst: torch.Tensor, block_mapping: torch.Tensor
157
+ ) -> None:
158
+ ops.swap_blocks(src, dst, block_mapping)
159
+
160
+
161
+ def convert_fp8(
162
+ output: torch.Tensor, input: torch.Tensor, scale: float = 1.0, kv_dtype: str = "fp8"
163
+ ) -> None:
164
+ ops.convert_fp8(output, input, scale, kv_dtype)
165
+
166
+
167
+ __all__ = [
168
+ "convert_fp8",
169
+ "paged_attention_v1",
170
+ "paged_attention_v2",
171
+ "reshape_and_cache",
172
+ "copy_blocks",
173
+ ]
build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _paged_attention_287831d
3
+ ops = torch.ops._paged_attention_287831d
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_paged_attention_287831d::{op_name}"
build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/_paged_attention_287831d.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b142366acfa55e943523691850460892ec311161d9dbba4b3ac4dcaa8a96794
3
+ size 121016696
build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/platforms.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from abc import ABC, abstractmethod
4
+ from functools import lru_cache, wraps
5
+ from typing import Callable, ParamSpec, TypeVar
6
+
7
+ import numpy as np
8
+ import torch
9
+
10
+ IS_ROCM = torch.version.hip is not None
11
+ IS_MPS = torch.backends.mps.is_available()
12
+
13
+
14
+ class Platform(ABC):
15
+ @classmethod
16
+ def seed_everything(cls, seed: int) -> None:
17
+ """
18
+ Set the seed of each random module.
19
+ `torch.manual_seed` will set seed on all devices.
20
+
21
+ Loosely based on: https://github.com/Lightning-AI/pytorch-lightning/blob/2.4.0/src/lightning/fabric/utilities/seed.py#L20
22
+ """
23
+ random.seed(seed)
24
+ np.random.seed(seed)
25
+ torch.manual_seed(seed)
26
+
27
+ @abstractmethod
28
+ def get_device_name(self, device_id: int = 0) -> str: ...
29
+
30
+ @abstractmethod
31
+ def is_cuda(self) -> bool: ...
32
+
33
+ @abstractmethod
34
+ def is_rocm(self) -> bool: ...
35
+
36
+ @abstractmethod
37
+ def is_mps(self) -> bool: ...
38
+
39
+
40
+ class CudaPlatform(Platform):
41
+ @classmethod
42
+ @lru_cache(maxsize=8)
43
+ def get_device_name(cls, device_id: int = 0) -> str:
44
+ return torch.cuda.get_device_name(0)
45
+
46
+ def is_cuda(self) -> bool:
47
+ return True
48
+
49
+ def is_rocm(self) -> bool:
50
+ return False
51
+
52
+ def is_mps(self) -> bool:
53
+ return False
54
+
55
+
56
+ class RocmPlatform(Platform):
57
+ @classmethod
58
+ @lru_cache(maxsize=8)
59
+ def get_device_name(cls, device_id: int = 0) -> str:
60
+ return torch.cuda.get_device_name(device_id)
61
+
62
+ def is_cuda(self) -> bool:
63
+ return False
64
+
65
+ def is_rocm(self) -> bool:
66
+ return True
67
+
68
+ def is_mps(self) -> bool:
69
+ return False
70
+
71
+
72
+ class MpsPlatform(Platform):
73
+ @classmethod
74
+ @lru_cache(maxsize=8)
75
+ def get_device_name(cls, device_id: int = 0) -> str:
76
+ return torch.cuda.get_device_name(device_id)
77
+
78
+ def is_cuda(self) -> bool:
79
+ return False
80
+
81
+ def is_rocm(self) -> bool:
82
+ return False
83
+
84
+ def is_mps(self) -> bool:
85
+ return True
86
+
87
+ current_platform = (
88
+ RocmPlatform() if IS_ROCM else
89
+ MpsPlatform() if IS_MPS else
90
+ CudaPlatform() if torch.cuda.is_available() else
91
+ None
92
+ )