Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,9 @@ check-lit-c:
check-lit-nvgpu:
[ `uname -s` = Darwin ] || env XTC_MLIR_TARGET=nvgpu lit -v tests/filecheck/backends tests/filecheck/mlir_loop tests/filecheck/evaluation

check-lit-mppa:
[ `uname -s` = Darwin ] || env XTC_MLIR_TARGET=mppa lit -v -j 1 tests/filecheck/backends/target_mppa

check-pytest:
scripts/pytest/run_pytest.sh -v

Expand Down
21 changes: 20 additions & 1 deletion docs/develop/optional_backends.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ It is installed automatically using the mlir_requirements.txt file.
For manual building and installation, please follow the README at https://gitlab.inria.fr/CORSE/xtc-mlir.
Note: The prebuilt XTC-MLIR package comes with its own version of the libLLVM.so

#### MLIR SDist extension
### MLIR SDist extension

The SDist extension provides distribution primitives. To install SDist:

Expand All @@ -46,6 +46,25 @@ The SDist extension provides distribution primitives. To install SDist:

Note that SDist is currently an Inria internal project (cf JIR section on how to access).

### Kalray MPPA target

XTC supports the Kalray MPPA (Coolidge v2) as a target. To us this target, you need several dependencies:
- The Kalray Core Toolchain (https://www.kalrayinc.com/products/software/) must be installed and sourced
- The Mlir-Mppa backend must be installed (currently an Inria/Kalray internal project)
- SDist must be installed

Then, you can test the installation using:
python tests/filecheck/backends/target_mppa/test_matmul_mlir_mppa.py

### Nvidia GPU target

XTC supports Nvidia GPUs as an experimental target. To use this target, you need several dependencies:
- The Cuda toolkit (https://docs.nvidia.com/cuda/cuda-installation-guide-linux/) installed and sourced
- LLVM-project with NVPTX target and Cuda runner for Mlir (enabled by default with the Mlir wheels for XTC)

Then, you can test the installation using:
python tests/filecheck/evaluation/test_matmul_pmu_counters_gpu.py

## TVM development version

Note that, if compiling TVM v0.16+ from source instead of using our wheels,
Expand Down
10 changes: 8 additions & 2 deletions src/xtc/backends/mlir/MlirCompiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,14 @@
get_default_target,
)
from xtc.utils.ext_tools import get_shlib_extension
from xtc.itf.runtime.common import CommonRuntimeInterface


class MlirCompiler(itf.comp.Compiler):
def __init__(
self,
backend: "backend.MlirBackend",
target: str | None = None,
target: str | CommonRuntimeInterface | None = None,
**kwargs: Any,
):
self._backend = backend
Expand All @@ -44,9 +45,12 @@ def __init__(
self._config = MlirConfig(**kwargs)
if target is None:
self._target = get_default_target()(self._config)
else:
elif isinstance(target, str):
self._target = get_target_from_name(target)(self._config)
elif isinstance(target, CommonRuntimeInterface):
self._target = get_target_from_name(target.target_name())(self._config)
assert self._target is not None
self._runtime_target = target
self._compiler_kwargs = kwargs

@property
Expand Down Expand Up @@ -158,6 +162,8 @@ def _save_temp(self, fname: str, content: Any) -> None:
outf.write(str(content))

def _register_mlir_extensions(self) -> None:
for extension in self._config.required_extensions:
self._mlir_program.require_extension(extension, weak=False)
if self._mlir_schedule is not None:
for extension, weak in self._mlir_schedule.mlir_extensions.items():
self._mlir_program.require_extension(extension, weak=weak)
Expand Down
1 change: 1 addition & 0 deletions src/xtc/backends/mlir/MlirConfig.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ class MlirConfig:
arch: str = "native"
cpu: str = "native"
selected_device: int | None = None
required_extensions: list[str] = field(default_factory=list)

def __post_init__(self):
object.__setattr__(
Expand Down
28 changes: 24 additions & 4 deletions src/xtc/backends/mlir/MlirGraphBackend.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,14 @@

from xdsl.dialects.func import FuncOp as xdslFuncOp
from xdsl.dialects import func, memref
from xdsl.dialects.builtin import MemRefType, f32, f64
from xdsl.dialects.builtin import (
MemRefType,
f32,
f64,
ArrayAttr,
UnitAttr,
DictionaryAttr,
)
from xdsl.ir import Region, Block, Operation
from xdsl.builder import ImplicitBuilder

Expand Down Expand Up @@ -97,6 +104,14 @@ def _init_from_graph(
self._xdsl_type_from_tensortype(cast(XTCTensorType, tensor_type))
for tensor_type in [*inputs_types, *outputs_types]
]
arg_attrs = ArrayAttr(
[
DictionaryAttr(
self._xdsl_attrs_from_tensortype(cast(XTCTensorType, tensor_type))
)
for tensor_type in [*inputs_types, *outputs_types]
]
)
inlined_block = Block(arg_types=params_types)
variables = {
name: arg
Expand All @@ -109,11 +124,11 @@ def _init_from_graph(
with ImplicitBuilder(inlined_block):
func.ReturnOp()
region = Region([inlined_block]) # type: ignore # issue with mypy
payload = xdslFuncOp.from_region(
payload = xdslFuncOp(
name=graph.name,
input_types=params_types,
return_types=[],
function_type=(params_types, []),
region=region,
arg_attrs=arg_attrs,
)
nodes_dict = {}
for attrs in block_attrs:
Expand All @@ -139,6 +154,11 @@ def _xdsl_type_from_tensortype(self, type: XTCTensorType) -> Any:
elt_type, shape = self._xdsl_elt_shape_from_tensortype(type)
return MemRefType(elt_type, shape)

def _xdsl_attrs_from_tensortype(self, type: XTCTensorType):
if type.device is not None:
return {"memref.on_device": UnitAttr()}
return {}

def _np_types_spec(
self, types: list[MemRefType]
) -> list[dict[str, tuple[int, ...] | str]]:
Expand Down
35 changes: 19 additions & 16 deletions src/xtc/backends/mlir/MlirProgram.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,23 +73,26 @@ def parse_and_add_function(
function, context=self.mlir_context
)

# Insert (or not) the noalias attributes
arg_attrs = []
if no_alias:
for _ in payload_func.arguments:
dict_attr = DictAttr.get(
{
"llvm.noalias": UnitAttr.get(context=self.mlir_context),
},
context=self.mlir_context,
with self.mlir_context:
# Insert (or not) the noalias attributes
new_arg_attrs = []
if no_alias:
for arg_attrs in payload_func.arg_attrs:
new_dict = {}
for i in range(len(arg_attrs)):
new_dict[arg_attrs[i].name] = arg_attrs[i].attr
new_dict["llvm.noalias"] = UnitAttr.get(context=self.mlir_context)
new_arg_attrs.append(
DictAttr.get(new_dict, context=self.mlir_context)
)
payload_func.arg_attrs = ArrayAttr.get(
new_arg_attrs, context=self.mlir_context
)
arg_attrs.append(dict_attr)
payload_func.arg_attrs = ArrayAttr.get(arg_attrs, context=self.mlir_context)

# Insert the function in the MLIR program
ip = InsertionPoint.at_block_begin(self.mlir_module.body)
ip.insert(payload_func)
name = str(payload_func.name).replace('"', "")
self.local_functions[str(name)] = payload_func
# Insert the function in the MLIR program
ip = InsertionPoint.at_block_begin(self.mlir_module.body)
ip.insert(payload_func)
name = str(payload_func.name).replace('"', "")
self.local_functions[str(name)] = payload_func

return payload_func
Loading
Loading